Compare commits

..

24 Commits

Author SHA1 Message Date
stasatdaglabs
7078dada5c Add a missing space to make go fmt happy. 2021-11-26 09:46:34 +02:00
Elichai Turkel
606b781ca0 Fix bug in RequiredDifficulty and release another version 2021-11-25 21:49:15 +02:00
Elichai Turkel
dbf18d8052 Hard fork - new genesis with the utxo set of the last block (#1856)
* UTXO dump of block 0fca37ca667c2d550a6c4416dad9717e50927128c424fa4edbebc436ab13aeef

* Activate HF immediately and change reward to 1000

* Change protocol version and datadir location

* Delete comments

* Fix zero hash to muhash zero hash in genesis utxo dump check

* Don't omit genesis as direct parent

* Fix tests

* Change subsidy to 500

* Dont assume genesis multiset is empty

* Fix BlockReward test

* Fix TestValidateAndInsertImportedPruningPoint test

* Fix pruning point genesis utxo set

* Fix tests related to mainnet utxo set

* Dont change the difficulty before you have a full window

* Fix TestBlockWindow tests

* Remove global utxo set variable, and persist mainnetnet utxo deserialization between runs

* Fix last tests

* Make peer banning opt-in

* small fix for a test

* Fix go lint

* Fix Ori's review comments

* Change DAA score of genesis to checkpoint DAA score and fix all tests

* Fix the BlockLevel bits counting

* Fix some tests and make them run a little faster

* Change datadir name back to kaspa-mainnet and change db path from /data to /datadir

* Last changes for the release and change the version to 0.11.5

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Ori Newman <>
Co-authored-by: msutton <mikisiton2@gmail.com>
2021-11-25 20:18:43 +02:00
Michael Sutton
2a1b38ce7a Fix mergeset limit violation bug (#1855)
* Added a test which reproduces a bug where virtual's mergeset exceeds the mergeset limit -- this test currently fails

* Added a simple condition to fix the mergeset limit bug

* Format issue
2021-11-24 19:34:59 +02:00
Ori Newman
29c410d123 Change HF time 2021-11-22 11:27:17 +02:00
Ori Newman
6e6fabf956 Change HF time 2021-11-22 11:24:55 +02:00
Ori Newman
b04292c97a Don't server parallel PruningPointAndItsAnticoneRequests 2021-11-22 09:56:30 +02:00
Ori Newman
765dd170e4 Optimizations and header size reduce hardfork (#1853)
* Modify DefaultTimeout to 120 seconds

A temporary workaround for nodes having trouble to sync (currently the download of pruning point related data during IBD takes more than 30 seconds)

* Cache existence in reachability store

* Cache block level in the header

* Fix IBD indication on submit block

* Add hardForkOmitGenesisFromParentsDAAScore logic

* Fix NumThreads bug in the wallet

* Get rid of ParentsAtLevel header method

* Fix a bug in BuildPruningPointProof

* Increase race detector timeout

* Add cache to BuildPruningPointProof

* Add comments and temp comment out go vet

* Fix ParentsAtLevel

* Dont fill empty parents

* Change HardForkOmitGenesisFromParentsDAAScore in fast netsync test

* Add --allow-submit-block-when-not-synced in stability tests

* Fix TestPruning

* Return fast tests

* Fix off by one error on kaspawallet

* Fetch only one block with trusted data at a time

* Update fork DAA score

* Don't ban for unexpected message type

* Fix tests

Co-authored-by: Michael Sutton <mikisiton2@gmail.com>
Co-authored-by: Ori Newman <>
2021-11-22 09:00:39 +02:00
Ori Newman
8e362845b3 Update to version 0.11.4 2021-11-21 01:52:56 +02:00
Ori Newman
5c1ba9170e Don't set blocks from the pruning point anticone as the header select tip (#1850)
* Decrease the dial timeout to 1 second

* Don't set blocks from the pruning point anticone as the header selected tip.

Co-authored-by: Kaspa Profiler <>
2021-11-13 18:59:20 +02:00
Elichai Turkel
9d8c555bdf Fix a bug in the matrix ranking algorithm (#1849)
* Fix a bug in the matrix ranking algorithm

* Add tests and benchmarks for matrix generation and ranking

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-11-13 17:51:11 +02:00
Ori Newman
a2f574eab8 Update to version 0.11.3 2021-11-13 17:18:40 +02:00
Kaspa Profiler
7bed86dc1b Update changelog.txt 2021-11-11 09:30:12 +02:00
Ori Newman
9b81f5145e Increase p2p msg size and reset utxo after ibd (#1847)
* Don't build unnecessary binaries

* Reset UTXO index after IBD

* Enlarge max msg size to 1gb

* Fix UTXO chunks logic

* Revert UTXO set override change

* Fix sendPruningPointUTXOSet

* Increase tests timeout to 20 minutes

Co-authored-by: Kaspa Profiler <>
2021-11-11 09:27:07 +02:00
Kaspa Profiler
cd8341ef57 Update to version 0.11.2 2021-11-10 23:19:25 +02:00
Ori Newman
ad8bdbed21 Update changelog (#1845)
Co-authored-by: Kaspa Profiler <>
2021-11-09 00:32:56 +02:00
Elichai Turkel
7cdceb6df0 Cache the miner state (#1844)
* Implement a MinerState to cache the matrix and friends

* Modify the miner and related code to use the new MinerCache

* Change MinerState to State

* Make go lint happy

Co-authored-by: Ori Newman <orinewman1@gmail.com>
Co-authored-by: Kaspa Profiler <>
2021-11-09 00:12:30 +02:00
stasatdaglabs
cc5248106e Update to version 0.11.1 2021-11-08 09:01:52 +02:00
Elichai Turkel
e3463b7268 Replace Keccak256 in oPoW with CSHAKE256 with domain seperation (#1842)
* Replace keccak with CSHAKE256 in oPoW

* Add benchmarks to hash writers to compare blake2b to the CSHAKE

* Update genesis blocks

* Update tests

* Define genesis's block level to be the maximal one

* Add message to genesis coinbase

* Add comments to genesis coinbase

* Fix tests

Co-authored-by: Ori Newman <orinewman1@gmail.com>
2021-11-07 18:36:30 +02:00
Ori Newman
a2173ef80a Switch PoW to a keccak heavyhash variant (#1841)
* Add another hash domain for HeavyHash

* Add a xoShiRo256PlusPlus implementation

* Add a HeavyHash implementation

* Replace our current PoW algorithm with oPoW

* Change to pow hash to keccak256

* Fix genesis

* Fix tests

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
2021-11-07 11:17:15 +02:00
stasatdaglabs
aeb4500b61 Add the daglabs-dev mainnet dnsseeder. (#1840) 2021-11-07 10:39:54 +02:00
Ori Newman
0a1daae319 Allow mainnet flag and raise wallet fee (#1838)
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
2021-11-07 10:04:27 +02:00
stasatdaglabs
131cd3357e Rename FixedSubsidySwitchHashRateDifference to FixedSubsidySwitchHashRateThreshold and set its value to 150GH/s. (#1837) 2021-11-07 09:33:39 +02:00
Ori Newman
ff72568d6b Fix pruning point anticone order (#1836)
* Send pruning point anticone in topological order
Fix a UTXO pagination bug
Lengthen the stabilization time for the last DAA test

* Extend "sudden hash rate drop" test length to 45 minutes

Co-authored-by: Kaspa Profiler <>
2021-11-07 08:21:34 +02:00
92 changed files with 1535 additions and 812 deletions

View File

@@ -1,7 +1,7 @@
name: Build and upload assets
on:
release:
types: [published]
types: [ published ]
jobs:
build:
@@ -36,7 +36,7 @@ jobs:
# `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-s -w` strips the binary to produce smaller size binaries
run: |
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/...
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
zip -r "${archive}" ./bin/*
@@ -47,7 +47,7 @@ jobs:
if: runner.os == 'Windows'
shell: bash
run: |
go build -v -ldflags="-s -w" -o bin/ ./...
go build -v -ldflags="-s -w" -o bin/ . ./cmd/...
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
powershell "Compress-Archive bin/* \"${archive}\""
@@ -57,7 +57,7 @@ jobs:
- name: Build on MacOS
if: runner.os == 'macOS'
run: |
go build -v -ldflags="-s -w" -o ./bin/ ./...
go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/...
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
zip -r "${archive}" ./bin/*

View File

@@ -46,4 +46,4 @@ jobs:
run: |
git checkout "${{ env.run_on }}"
git status
go test -race ./...
go test -timeout 20m -race ./...

View File

@@ -20,7 +20,10 @@ import (
"github.com/kaspanet/kaspad/version"
)
const leveldbCacheSizeMiB = 256
const (
leveldbCacheSizeMiB = 256
defaultDataDirname = "datadir2"
)
var desiredLimits = &limits.DesiredLimits{
FileLimitWant: 2048,
@@ -159,7 +162,7 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
// dbPath returns the path to the block database given a database type.
func databasePath(cfg *config.Config) string {
return filepath.Join(cfg.AppDir, "data")
return filepath.Join(cfg.AppDir, defaultDataDirname)
}
func removeDatabase(cfg *config.Config) error {

View File

@@ -12,7 +12,7 @@ import (
const (
// ProtocolVersion is the latest protocol version this package supports.
ProtocolVersion uint32 = 1
ProtocolVersion uint32 = 3
// DefaultServices describes the default services that are supported by
// the server.

View File

@@ -8,7 +8,7 @@ import (
// DefaultTimeout is the default duration to wait for enqueuing/dequeuing
// to/from routes.
const DefaultTimeout = 30 * time.Second
const DefaultTimeout = 120 * time.Second
// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist.
var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists")

View File

@@ -3,8 +3,12 @@ package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"runtime"
"sync/atomic"
)
// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow.
@@ -12,51 +16,80 @@ type PruningPointAndItsAnticoneRequestsContext interface {
Domain() domain.Domain
}
var isBusy uint32
// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends
// the pruning point and its anticone to the requesting peer.
func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
blocks, err := context.Domain().Consensus().PruningPointAndItsAnticoneWithTrustedData()
if err != nil {
return err
}
for _, block := range blocks {
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(block))
err := func() error {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) {
return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests")
}
defer atomic.StoreUint32(&isBusy, 0)
log.Debugf("Got request for pruning point and its anticone from %s", peer)
pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders()
if err != nil {
return err
}
msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders))
for i, header := range pruningPointHeaders {
msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header)
}
err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders))
if err != nil {
return err
}
pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone()
if err != nil {
return err
}
for _, blockHash := range pointAndItsAnticone {
err := sendBlockWithTrustedData(context, outgoingRoute, blockHash)
if err != nil {
return err
}
}
err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData())
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
return nil
}()
if err != nil {
return err
}
log.Debugf("Sent pruning point and its anticone to %s", peer)
}
}
func sendBlockWithTrustedData(context PruningPointAndItsAnticoneRequestsContext, outgoingRoute *router.Route, blockHash *externalapi.DomainHash) error {
blockWithTrustedData, err := context.Domain().Consensus().BlockWithTrustedData(blockHash)
if err != nil {
return err
}
err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedData(blockWithTrustedData))
if err != nil {
return err
}
runtime.GC()
return nil
}

View File

@@ -70,7 +70,8 @@ func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXO
}
msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet)
if !ok {
return nil, protocolerrors.Errorf(true, "received unexpected message type. "+
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return nil, protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command())
}
return msgRequestPruningPointUTXOSet, nil
@@ -102,14 +103,17 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
return err
}
if len(pruningPointUTXOs) < step {
finished := len(pruningPointUTXOs) < step
if finished && chunksSent%ibdBatchSize != 0 {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
if len(pruningPointUTXOs) > 0 {
fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint
}
chunksSent++
// Wait for the peer to request more chunks every `ibdBatchSize` chunks
@@ -120,9 +124,17 @@ func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet(
}
_, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk)
if !ok {
return protocolerrors.Errorf(true, "received unexpected message type. "+
// TODO: Change to shouldBan: true once we fix the bug of getting redundant messages
return protocolerrors.Errorf(false, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command())
}
if finished {
log.Debugf("Finished sending UTXOs for pruning block %s",
msgRequestPruningPointUTXOSet.PruningPointHash)
return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks())
}
}
}
}

View File

@@ -130,7 +130,7 @@ func (flow *handleRelayInvsFlow) downloadHeadersAndPruningUTXOSet(highHash *exte
return err
}
log.Debugf("Headers downloaded from peer %s", flow.peer)
log.Infof("Headers downloaded from peer %s", flow.peer)
highHashInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(highHash)
if err != nil {

View File

@@ -15,7 +15,7 @@ import (
// maxProtocolVersion version is the maximum supported protocol
// version this kaspad node supports
const maxProtocolVersion = 1
const maxProtocolVersion = 3
// Peer holds data about a peer.
type Peer struct {

View File

@@ -102,7 +102,7 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) {
if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) {
if !m.context.Config().DisableBanning && protocolErr.ShouldBan {
if m.context.Config().EnableBanning && protocolErr.ShouldBan {
log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause)
err := m.context.ConnectionManager().Ban(netConnection)

View File

@@ -14,9 +14,14 @@ import (
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
if context.ProtocolManager.IsIBDRunning() {
isSynced, err := context.ProtocolManager.ShouldMine()
if err != nil {
return nil, err
}
if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced {
return &appmessage.SubmitBlockResponseMessage{
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
Error: appmessage.RPCErrorf("Block not submitted - node is not synced"),
RejectReason: appmessage.RejectReasonIsInIBD,
}, nil
}

View File

@@ -15,13 +15,11 @@ golint -set_exit_status ./...
staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./...
go vet -composites=false $FLAGS ./...
go build $FLAGS -o kaspad .
if [ -n "${NO_PARALLEL}" ]
then
go test -parallel=1 $FLAGS ./...
go test -timeout 20m -parallel=1 $FLAGS ./...
else
go test $FLAGS ./...
go test -timeout 20m $FLAGS ./...
fi

View File

@@ -1,3 +1,15 @@
Kaspad v0.11.2 - 2021-11-11
===========================
Bug fixes:
* Enlarge p2p max message size to 1gb
* Fix UTXO chunks logic
* Increase tests timeout to 20 minutes
Kaspad v0.11.1 - 2021-11-09
===========================
Non-breaking changes:
* Cache the miner state
Kaspad v0.10.2 - 2021-05-18
===========================
Non-breaking changes:

View File

@@ -13,7 +13,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
)
@@ -143,20 +142,20 @@ func mineNextBlock(mineWhenNotSynced bool) *externalapi.DomainBlock {
// In the rare case where the nonce space is exhausted for a specific
// block, it'll keep looping the nonce until a new block template
// is discovered.
block := getBlockForMining(mineWhenNotSynced)
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
headerForMining.SetNonce(nonce)
block, state := getBlockForMining(mineWhenNotSynced)
state.Nonce = nonce
atomic.AddUint64(&hashesTried, 1)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
if state.CheckProofOfWork() {
mutHeader := block.Header.ToMutable()
mutHeader.SetNonce(nonce)
block.Header = mutHeader.ToImmutable()
log.Infof("Found block %s with parents %s", consensushashing.BlockHash(block), block.Header.DirectParents())
return block
}
}
}
func getBlockForMining(mineWhenNotSynced bool) *externalapi.DomainBlock {
func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.State) {
tryCount := 0
const sleepTime = 500 * time.Millisecond
@@ -166,7 +165,7 @@ func getBlockForMining(mineWhenNotSynced bool) *externalapi.DomainBlock {
tryCount++
shouldLog := (tryCount-1)%10 == 0
template, isSynced := templatemanager.Get()
template, state, isSynced := templatemanager.Get()
if template == nil {
if shouldLog {
log.Info("Waiting for the initial template")
@@ -182,7 +181,7 @@ func getBlockForMining(mineWhenNotSynced bool) *externalapi.DomainBlock {
continue
}
return template
return template, state
}
}

View File

@@ -3,23 +3,26 @@ package templatemanager
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"sync"
)
var currentTemplate *externalapi.DomainBlock
var currentState *pow.State
var isSynced bool
var lock = &sync.Mutex{}
// Get returns the template to work on
func Get() (*externalapi.DomainBlock, bool) {
func Get() (*externalapi.DomainBlock, *pow.State, bool) {
lock.Lock()
defer lock.Unlock()
// Shallow copy the block so when the user replaces the header it won't affect the template here.
if currentTemplate == nil {
return nil, false
return nil, nil, false
}
block := *currentTemplate
return &block, isSynced
state := *currentState
return &block, &state, isSynced
}
// Set sets the current template to work on
@@ -31,6 +34,7 @@ func Set(template *appmessage.GetBlockTemplateResponseMessage) error {
lock.Lock()
defer lock.Unlock()
currentTemplate = block
currentState = pow.NewState(block.Header.ToMutable())
isSynced = template.IsSynced
return nil
}

View File

@@ -56,6 +56,7 @@ func create(conf *createConfig) error {
}
file := keys.File{
Version: keys.LastVersion,
EncryptedMnemonics: encryptedMnemonics,
ExtendedPublicKeys: extendedPublicKeys,
MinimumSignatures: conf.MinimumSignatures,

View File

@@ -19,7 +19,7 @@ func Connect(address string) (pb.KaspawalletdClient, func(), error) {
conn, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return nil, nil, errors.New("kaspactl daemon is not running, start it with `kaspactl start-daemon`")
return nil, nil, errors.New("kaspawallet daemon is not running, start it with `kaspawallet start-daemon`")
}
return nil, nil, err
}

View File

@@ -28,7 +28,7 @@ func (s *server) CreateUnsignedTransaction(_ context.Context, request *pb.Create
}
// TODO: Implement a better fee estimation mechanism
const feePerInput = 1000
const feePerInput = 10000
selectedUTXOs, changeSompi, err := s.selectUTXOs(request.Amount, feePerInput)
if err != nil {
return nil, err

View File

@@ -97,7 +97,7 @@ func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, erro
return nil, err
}
aead, err := getAEAD(password, salt)
aead, err := getAEAD(defaultNumThreads, password, salt)
if err != nil {
return nil, err
}

View File

@@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/util"
@@ -22,6 +23,9 @@ var (
defaultAppDir = util.AppDir("kaspawallet", false)
)
// LastVersion is the most up to date file format version
const LastVersion = 1
func defaultKeysFile(netParams *dagconfig.Params) string {
return filepath.Join(defaultAppDir, netParams.Name, "keys.json")
}
@@ -32,6 +36,8 @@ type encryptedPrivateKeyJSON struct {
}
type keysFileJSON struct {
Version uint32 `json:"version"`
NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`.
EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"`
ExtendedPublicKeys []string `json:"publicKeys"`
MinimumSignatures uint32 `json:"minimumSignatures"`
@@ -49,6 +55,8 @@ type EncryptedMnemonic struct {
// File holds all the data related to the wallet keys
type File struct {
Version uint32
NumThreads uint8 // This field is ignored for versions different than 0
EncryptedMnemonics []*EncryptedMnemonic
ExtendedPublicKeys []string
MinimumSignatures uint32
@@ -69,6 +77,8 @@ func (d *File) toJSON() *keysFileJSON {
}
return &keysFileJSON{
Version: d.Version,
NumThreads: d.NumThreads,
EncryptedPrivateKeys: encryptedPrivateKeysJSON,
ExtendedPublicKeys: d.ExtendedPublicKeys,
MinimumSignatures: d.MinimumSignatures,
@@ -80,6 +90,8 @@ func (d *File) toJSON() *keysFileJSON {
}
func (d *File) fromJSON(fileJSON *keysFileJSON) error {
d.Version = fileJSON.Version
d.NumThreads = fileJSON.NumThreads
d.MinimumSignatures = fileJSON.MinimumSignatures
d.ECDSA = fileJSON.ECDSA
d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys
@@ -181,10 +193,20 @@ func (d *File) DecryptMnemonics(cmdLinePassword string) ([]string, error) {
if len(password) == 0 {
password = getPassword("Password:")
}
var numThreads uint8
if len(d.EncryptedMnemonics) > 0 {
var err error
numThreads, err = d.numThreads(password)
if err != nil {
return nil, err
}
}
privateKeys := make([]string, len(d.EncryptedMnemonics))
for i, encryptedPrivateKey := range d.EncryptedMnemonics {
var err error
privateKeys[i], err = decryptMnemonic(encryptedPrivateKey, password)
privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, password)
if err != nil {
return nil, err
}
@@ -278,13 +300,73 @@ func (d *File) Save() error {
return nil
}
func getAEAD(password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, uint8(runtime.NumCPU()), 32)
const defaultNumThreads = 8
func (d *File) numThreads(password []byte) (uint8, error) {
// There's a bug in v0 wallets where the number of threads
// was determined by the number of logical CPUs at the machine,
// which made the authentication non-deterministic across platforms.
// In order to solve it we introduce v1 where the number of threads
// is constant, and brute force the number of threads in v0. After we
// find the right amount via brute force we save the result to the file.
if d.Version != 0 {
return defaultNumThreads, nil
}
if d.NumThreads != 0 {
return d.NumThreads, nil
}
numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0].salt)
if err != nil {
return 0, err
}
d.NumThreads = numThreads
err = d.Save()
if err != nil {
return 0, err
}
return numThreads, nil
}
func (d *File) detectNumThreads(password, salt []byte) (uint8, error) {
numCPU := uint8(runtime.NumCPU())
_, err := getAEAD(numCPU, password, salt)
if err != nil {
if !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return numCPU, nil
}
for i := uint8(1); ; i++ {
if i == numCPU {
continue
}
_, err := getAEAD(i, password, salt)
if err != nil {
const maxTries = 32
if i > maxTries || !strings.Contains(err.Error(), "message authentication failed") {
return 0, err
}
} else {
return i, nil
}
}
}
func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) {
key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32)
return chacha20poly1305.NewX(key)
}
func decryptMnemonic(encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(password, encryptedPrivateKey.salt)
func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) {
aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt)
if err != nil {
return "", err
}

View File

@@ -137,11 +137,18 @@ func (s *consensus) Init(skipAddingGenesis bool) error {
return nil
}
func (s *consensus) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.PruningPointAndItsAnticoneWithTrustedData()
return s.pruningManager.PruningPointAndItsAnticone()
}
func (s *consensus) BlockWithTrustedData(blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.pruningManager.BlockWithTrustedData(model.NewStagingArea(), blockHash)
}
// BuildBlock builds a block over the current state, with the transactions
@@ -716,7 +723,10 @@ func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) {
func (s *consensus) ResolveVirtual() error {
// In order to prevent a situation that the consensus lock is held for too much time, we
// release the lock each time resolve 100 blocks.
for {
for i := 0; ; i++ {
if i%10 == 0 {
log.Infof("Resolving virtual. This may take some time...")
}
var isCompletelyResolved bool
var err error
func() {
@@ -730,6 +740,7 @@ func (s *consensus) ResolveVirtual() error {
}
if isCompletelyResolved {
log.Infof("Resolved virtual")
return nil
}
}

View File

@@ -6,7 +6,9 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
)
var reachabilityDataBucketName = []byte("reachability-data")
@@ -50,6 +52,8 @@ func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool
return rds.stagingShard(stagingArea).isStaged()
}
var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found")
// ReachabilityData returns the reachabilityData associated with the given blockHash
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
stagingShard := rds.stagingShard(stagingArea)
@@ -59,10 +63,16 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
}
if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok {
if reachabilityData == nil {
return nil, errNotFound
}
return reachabilityData.(model.ReachabilityData), nil
}
reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash))
if database.IsNotFoundError(err) {
rds.reachabilityDataCache.Add(blockHash, nil)
}
if err != nil {
return nil, err
}
@@ -76,17 +86,15 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
}
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
stagingShard := rds.stagingShard(stagingArea)
if _, ok := stagingShard.reachabilityData[*blockHash]; ok {
return true, nil
_, err := rds.ReachabilityData(dbContext, stagingArea, blockHash)
if database.IsNotFoundError(err) {
return false, nil
}
if err != nil {
return false, err
}
if rds.reachabilityDataCache.Has(blockHash) {
return true, nil
}
return dbContext.Has(rds.reachabilityDataBlockHashAsKey(blockHash))
return true, nil
}
// ReachabilityReindexRoot returns the current reachability reindex root

View File

@@ -4,6 +4,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder"
parentssanager "github.com/kaspanet/kaspad/domain/consensus/processes/parentsmanager"
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningproofmanager"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"io/ioutil"
@@ -158,12 +159,16 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
dagTraversalManager := dagTraversalManagers[0]
// Processes
parentsManager := parentssanager.New(config.GenesisHash)
blockParentBuilder := blockparentbuilder.New(
dbManager,
blockHeaderStore,
dagTopologyManager,
parentsManager,
reachabilityDataStore,
pruningStore,
config.GenesisHash,
)
pastMedianTimeManager := f.pastMedianTimeConsructor(
config.TimestampDeviationTolerance,
@@ -206,7 +211,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
config.CoinbasePayloadScriptPublicKeyMaxLength,
config.GenesisHash,
config.FixedSubsidySwitchPruningPointInterval,
config.FixedSubsidySwitchHashRateDifference,
config.FixedSubsidySwitchHashRateThreshold,
dagTraversalManager,
ghostdagDataStore,
acceptanceDataStore,
@@ -316,6 +321,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
finalityManager,
blockParentBuilder,
pruningManager,
parentsManager,
pruningStore,
blockStore,
@@ -403,6 +409,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas
ghostdagManagers,
reachabilityManagers,
dagTraversalManagers,
parentsManager,
ghostdagDataStores,
pruningStore,
@@ -581,7 +588,7 @@ func dagStores(config *Config,
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches)
} else {
blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 200, false)
reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, 86400, false)
ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false)
}
}

View File

@@ -16,8 +16,8 @@ import (
func TestFinality(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Set finalityInterval to 50 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 50 * consensusConfig.TargetTimePerBlock
// Set finalityInterval to 20 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 20 * consensusConfig.TargetTimePerBlock
factory := consensus.NewFactory()
consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestFinality")
@@ -180,7 +180,8 @@ func TestFinality(t *testing.T) {
func TestBoundedMergeDepth(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Set finalityInterval to 50 blocks, so that test runs quickly
consensusConfig.FinalityDuration = 50 * consensusConfig.TargetTimePerBlock
consensusConfig.K = 5
consensusConfig.FinalityDuration = 7 * consensusConfig.TargetTimePerBlock
finalityInterval := int(consensusConfig.FinalityDepth())
if int(consensusConfig.K) >= finalityInterval {

View File

@@ -58,7 +58,6 @@ type BlockHeader interface {
type BaseBlockHeader interface {
Version() uint16
Parents() []BlockLevelParents
ParentsAtLevel(level int) BlockLevelParents
DirectParents() BlockLevelParents
HashMerkleRoot() *DomainHash
AcceptedIDMerkleRoot() *DomainHash
@@ -70,6 +69,7 @@ type BaseBlockHeader interface {
BlueScore() uint64
BlueWork() *big.Int
PruningPoint() *DomainHash
BlockLevel() int
Equal(other BaseBlockHeader) bool
}

View File

@@ -25,7 +25,8 @@ type Consensus interface {
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
PruningPoint() (*DomainHash, error)
PruningPointHeaders() ([]BlockHeader, error)
PruningPointAndItsAnticoneWithTrustedData() ([]*BlockWithTrustedData, error)
PruningPointAndItsAnticone() ([]*DomainHash, error)
BlockWithTrustedData(blockHash *DomainHash) (*BlockWithTrustedData, error)
ClearImportedPruningPointData() error
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error
ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error

View File

@@ -5,5 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// BlockParentBuilder exposes a method to build super-block parents for
// a given set of direct parents
type BlockParentBuilder interface {
BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
BuildParents(stagingArea *StagingArea,
daaScore uint64,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error)
}

View File

@@ -0,0 +1,9 @@
package model
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed.
type ParentsManager interface {
ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents
Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents
}

View File

@@ -12,6 +12,7 @@ type PruningManager interface {
AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error
UpdatePruningPointIfRequired() error
PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error
PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error)
PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error)
ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
BlockWithTrustedData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error)
}

View File

@@ -183,10 +183,16 @@ func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingAr
func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction,
newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) {
parents, err := bb.newBlockParents(stagingArea)
daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil {
return nil, err
}
parents, err := bb.newBlockParents(stagingArea, daaScore)
if err != nil {
return nil, err
}
timeInMilliseconds, err := bb.newBlockTime(stagingArea)
if err != nil {
return nil, err
@@ -204,10 +210,6 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
if err != nil {
return nil, err
}
daaScore, err := bb.newBlockDAAScore(stagingArea)
if err != nil {
return nil, err
}
blueWork, err := bb.newBlockBlueWork(stagingArea)
if err != nil {
return nil, err
@@ -233,12 +235,12 @@ func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions
), nil
}
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]externalapi.BlockLevelParents, error) {
func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) {
virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash)
if err != nil {
return nil, err
}
return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents)
return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents)
}
func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) {

View File

@@ -83,7 +83,7 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre
return nil, err
}
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes)
parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes)
if err != nil {
return nil, err
}

View File

@@ -5,7 +5,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/pkg/errors"
)
@@ -13,8 +12,11 @@ type blockParentBuilder struct {
databaseContext model.DBManager
blockHeaderStore model.BlockHeaderStore
dagTopologyManager model.DAGTopologyManager
parentsManager model.ParentsManager
reachabilityDataStore model.ReachabilityDataStore
pruningStore model.PruningStore
genesisHash *externalapi.DomainHash
}
// New creates a new instance of a BlockParentBuilder
@@ -22,20 +24,27 @@ func New(
databaseContext model.DBManager,
blockHeaderStore model.BlockHeaderStore,
dagTopologyManager model.DAGTopologyManager,
parentsManager model.ParentsManager,
reachabilityDataStore model.ReachabilityDataStore,
pruningStore model.PruningStore,
genesisHash *externalapi.DomainHash,
) model.BlockParentBuilder {
return &blockParentBuilder{
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
databaseContext: databaseContext,
blockHeaderStore: blockHeaderStore,
dagTopologyManager: dagTopologyManager,
parentsManager: parentsManager,
reachabilityDataStore: reachabilityDataStore,
pruningStore: pruningStore,
genesisHash: genesisHash,
}
}
func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) {
// Late on we'll mutate direct parent hashes, so we first clone it.
directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes))
@@ -93,7 +102,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
// all the block levels they occupy
for _, directParentHeader := range directParentHeaders {
directParentHash := consensushashing.HeaderHash(directParentHeader)
blockLevel := pow.BlockLevel(directParentHeader)
blockLevel := directParentHeader.BlockLevel()
for i := 0; i <= blockLevel; i++ {
if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists {
candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@@ -116,7 +125,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
for _, directParentHeader := range directParentHeaders {
for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() {
for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) {
isEmptyLevel := false
if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists {
candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash)
@@ -145,7 +154,7 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
} else {
for childHash, childHeader := range virtualGenesisChildrenHeaders {
childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse
if childHeader.ParentsAtLevel(blockLevel).Contains(parent) {
if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) {
referenceBlocks = append(referenceBlocks, &childHash)
}
}
@@ -203,14 +212,21 @@ func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea,
}
}
parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap))
parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap))
for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ {
if blockLevel > 0 {
if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 {
break
}
}
levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel]))
for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] {
block := block // Assign to a new pointer to avoid `range` pointer reuse
levelBlocks = append(levelBlocks, &block)
}
parents[blockLevel] = levelBlocks
parents = append(parents, levelBlocks)
}
return parents, nil
}

View File

@@ -1,12 +1,20 @@
package blockprocessor
import (
"bytes"
"compress/gzip"
"github.com/kaspanet/go-muhash"
// we need to embed the utxoset of mainnet genesis here
_ "embed"
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/util/staging"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/kaspanet/kaspad/util/staging"
"io"
"sync"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
@@ -16,6 +24,9 @@ import (
"github.com/pkg/errors"
)
//go:embed resources/utxos.gz
var utxoDumpFile []byte
func (bp *blockProcessor) setBlockStatusAfterBlockValidation(
stagingArea *model.StagingArea, block *externalapi.DomainBlock, isPruningPoint bool) error {
@@ -102,7 +113,33 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
}
}
err = bp.headerTipsManager.AddHeaderTip(stagingArea, blockHash)
shouldAddHeaderSelectedTip := false
if !hasHeaderSelectedTip {
shouldAddHeaderSelectedTip = true
} else {
pruningPoint, err := bp.pruningStore.PruningPoint(bp.databaseContext, stagingArea)
if err != nil {
return nil, err
}
isInSelectedChainOfPruningPoint, err := bp.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, pruningPoint, blockHash)
if err != nil {
return nil, err
}
// Don't set blocks in the anticone of the pruning point as header selected tip.
shouldAddHeaderSelectedTip = isInSelectedChainOfPruningPoint
}
if shouldAddHeaderSelectedTip {
// Don't set blocks in the anticone of the pruning point as header selected tip.
err = bp.headerTipsManager.AddHeaderTip(stagingArea, blockHash)
if err != nil {
return nil, err
}
}
err = bp.ifGenesisSetUtxoSet(block)
if err != nil {
return nil, err
}
@@ -194,6 +231,93 @@ func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea,
}, nil
}
var mainnetGenesisUTXOSet externalapi.UTXODiff
var mainnetGenesisMultiSet model.Multiset
var mainnetGenesisOnce sync.Once
var mainnetGenesisErr error
func deserializeMainnetUTXOSet() (externalapi.UTXODiff, model.Multiset, error) {
mainnetGenesisOnce.Do(func() {
toAdd := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
mainnetGenesisMultiSet = multiset.New()
file, err := gzip.NewReader(bytes.NewReader(utxoDumpFile))
if err != nil {
mainnetGenesisErr = err
return
}
for i := 0; ; i++ {
size := make([]byte, 1)
_, err = io.ReadFull(file, size)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
mainnetGenesisErr = err
return
}
serializedUTXO := make([]byte, size[0])
_, err = io.ReadFull(file, serializedUTXO)
if err != nil {
mainnetGenesisErr = err
return
}
mainnetGenesisMultiSet.Add(serializedUTXO)
entry, outpoint, err := utxo.DeserializeUTXO(serializedUTXO)
if err != nil {
mainnetGenesisErr = err
return
}
toAdd[*outpoint] = entry
}
mainnetGenesisUTXOSet, mainnetGenesisErr = utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(toAdd), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)))
})
return mainnetGenesisUTXOSet, mainnetGenesisMultiSet, mainnetGenesisErr
}
func (bp *blockProcessor) ifGenesisSetUtxoSet(block *externalapi.DomainBlock) error {
isGenesis := len(block.Header.DirectParents()) == 0
if !isGenesis {
return nil
}
blockHash := consensushashing.BlockHash(block)
if !block.Header.UTXOCommitment().Equal(externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray())) {
log.Infof("Loading checkpoint UTXO set")
diff, utxoSetMultiset, err := deserializeMainnetUTXOSet()
if err != nil {
return err
}
log.Infof("Finished loading checkpoint UTXO set")
utxoSetHash := utxoSetMultiset.Hash()
if !utxoSetHash.Equal(block.Header.UTXOCommitment()) {
return errors.New("Invalid UTXO set dump")
}
area := model.NewStagingArea()
bp.consensusStateStore.StageVirtualUTXODiff(area, diff)
bp.utxoDiffStore.Stage(area, blockHash, diff, nil)
// commit the multiset of genesis
bp.multisetStore.Stage(area, blockHash, utxoSetMultiset)
err = staging.CommitAllChanges(bp.databaseContext, area)
if err != nil {
return err
}
} else {
// if it's genesis but has an empty muhash then commit an empty multiset and an empty diff
area := model.NewStagingArea()
bp.consensusStateStore.StageVirtualUTXODiff(area, utxo.NewUTXODiff())
bp.utxoDiffStore.Stage(area, blockHash, utxo.NewUTXODiff(), nil)
bp.multisetStore.Stage(area, blockHash, multiset.New())
err := staging.CommitAllChanges(bp.databaseContext, area)
if err != nil {
return err
}
}
return nil
}
func isHeaderOnlyBlock(block *externalapi.DomainBlock) bool {
return len(block.Transactions) == 0
}

View File

@@ -87,13 +87,18 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
t.Fatalf("PruningPointHeaders: %+v", err)
}
pruningPointAndItsAnticoneWithTrustedData, err := tcSyncer.PruningPointAndItsAnticoneWithTrustedData()
pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("PruningPointAndItsAnticoneWithTrustedData: %+v", err)
t.Fatalf("PruningPointAndItsAnticone: %+v", err)
}
for _, blockWithTrustedData := range pruningPointAndItsAnticoneWithTrustedData {
_, err := synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
for _, blockHash := range pruningPointAndItsAnticone {
blockWithTrustedData, err := tcSyncer.BlockWithTrustedData(blockHash)
if err != nil {
return
}
_, err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false)
if err != nil {
t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err)
}
@@ -135,10 +140,21 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) {
}
}
pruningPointUTXOs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, nil, 1000)
if err != nil {
t.Fatalf("GetPruningPointUTXOs: %+v", err)
var fromOutpoint *externalapi.DomainOutpoint
var pruningPointUTXOs []*externalapi.OutpointAndUTXOEntryPair
const step = 100_000
for {
outpointAndUTXOEntryPairs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step)
if err != nil {
t.Fatalf("GetPruningPointUTXOs: %+v", err)
}
fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint
pruningPointUTXOs = append(pruningPointUTXOs, outpointAndUTXOEntryPairs...)
if len(outpointAndUTXOEntryPairs) < step {
break
}
}
err = synceeStaging.AppendImportedPruningPointUTXOs(pruningPointUTXOs)
if err != nil {
t.Fatalf("AppendImportedPruningPointUTXOs: %+v", err)
@@ -502,7 +518,7 @@ func TestGetPruningPointUTXOs(t *testing.T) {
// Get pruning point UTXOs in a loop
var allOutpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair
step := 100
const step = 100_000
var fromOutpoint *externalapi.DomainOutpoint
for {
outpointAndUTXOEntryPairs, err := testConsensus.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step)
@@ -517,11 +533,17 @@ func TestGetPruningPointUTXOs(t *testing.T) {
}
}
const mainnetUTXOSize = 1232643
expected := len(outputs) + 1
if consensusConfig.Name == "kaspa-mainnet" {
expected += mainnetUTXOSize
}
// Make sure the length of the UTXOs is exactly spendingTransaction.Outputs + 1 coinbase
// output (includingBlock's coinbase)
if len(allOutpointAndUTXOEntryPairs) != len(outputs)+1 {
if len(allOutpointAndUTXOEntryPairs) != expected {
t.Fatalf("Returned an unexpected amount of UTXOs. "+
"Want: %d, got: %d", len(outputs)+2, len(allOutpointAndUTXOEntryPairs))
"Want: %d, got: %d", expected, len(allOutpointAndUTXOEntryPairs))
}
// Make sure all spendingTransaction.Outputs are in the returned UTXOs

View File

@@ -4,6 +4,8 @@ import (
"bytes"
"math"
"math/big"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -21,6 +23,31 @@ import (
"github.com/pkg/errors"
)
func TestBlockValidator_ValidateBodyInIsolation(t *testing.T) {
tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){
CheckBlockSanity,
CheckBlockHashMerkleRoot,
BlockMass,
CheckBlockDuplicateTransactions,
CheckBlockContainsOnlyOneCoinbase,
CheckBlockDoubleSpends,
CheckFirstBlockTransactionIsCoinbase,
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestChainedTransactions")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for _, test := range tests {
testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
t.Run(testName, func(t *testing.T) {
test(t, tc, consensusConfig)
})
}
})
}
func TestChainedTransactions(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.BlockCoinbaseMaturity = 0
@@ -89,47 +116,39 @@ func TestChainedTransactions(t *testing.T) {
})
}
// TestCheckBlockSanity tests the CheckBlockSanity function to ensure it works
// CheckBlockSanity tests the CheckBlockSanity function to ensure it works
// as expected.
func TestCheckBlockSanity(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockSanity")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
blockHash := consensushashing.BlockHash(&exampleValidBlock)
if len(exampleValidBlock.Transactions) < 3 {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
}
func CheckBlockSanity(t *testing.T, tc testapi.TestConsensus, _ *consensus.Config) {
blockHash := consensushashing.BlockHash(&exampleValidBlock)
if len(exampleValidBlock.Transactions) < 3 {
t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions))
}
stagingArea := model.NewStagingArea()
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Fatalf("Failed validating block in isolation: %v", err)
}
err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Fatalf("Failed validating block in isolation: %v", err)
}
// Test with block with wrong transactions sorting order
blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder)
tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
}
// Test with block with wrong transactions sorting order
blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder)
tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) {
t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err)
}
// Test a block with invalid parents order
// We no longer require blocks to have ordered parents
blockHash = consensushashing.BlockHash(&unOrderedParentsBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)
}
})
// Test a block with invalid parents order
// We no longer require blocks to have ordered parents
blockHash = consensushashing.BlockHash(&unOrderedParentsBlock)
tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err != nil {
t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err)
}
}
var unOrderedParentsBlock = externalapi.DomainBlock{
@@ -1025,59 +1044,41 @@ var blockWithWrongTxOrder = externalapi.DomainBlock{
},
}
func TestCheckBlockHashMerkleRoot(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockHashMerkleRoot")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
func CheckBlockHashMerkleRoot(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
blockWithInvalidMerkleRoot := block.Clone()
blockWithInvalidMerkleRoot.Transactions[0].Version += 1
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
_, err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true)
if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) {
t.Fatalf("Unexpected error: %+v", err)
}
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
})
// Check that a block with invalid merkle root is not marked as invalid
// and can be re-added with the right transactions.
_, err = tc.ValidateAndInsertBlock(block, true)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
}
func TestBlockMass(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockMass")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
func BlockMass(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) {
t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) {
t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err)
}
}
func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1113,30 +1114,20 @@ func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx})
}
func TestCheckBlockDuplicateTransactions(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckBlockDuplicateTransactions(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockDuplicateTransactions")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err)
}
}
func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1170,30 +1161,20 @@ func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc tes
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx, tx})
}
func TestCheckBlockContainsOnlyOneCoinbase(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckBlockContainsOnlyOneCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockContainsOnlyOneCoinbase")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err)
}
}
func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1227,30 +1208,20 @@ func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc test
return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx})
}
func TestCheckBlockDoubleSpends(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckBlockDoubleSpends(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := initBlockWithDoubleSpends(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockDoubleSpends")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block, _, err := initBlockWithDoubleSpends(consensusConfig, tc)
if err != nil {
t.Fatalf("Error BuildBlockWithParents : %+v", err)
}
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err)
}
})
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err)
}
}
func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) {
@@ -1303,27 +1274,18 @@ func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.Tes
&emptyCoinbase, []*externalapi.DomainTransaction{tx, txSameOutpoint})
}
func TestCheckFirstBlockTransactionIsCoinbase(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
func CheckFirstBlockTransactionIsCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckFirstBlockTransactionIsCoinbase")
if err != nil {
t.Fatalf("Error setting up tc: %+v", err)
}
defer teardown(false)
block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig)
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig)
blockHash := consensushashing.BlockHash(block)
stagingArea := model.NewStagingArea()
tc.BlockStore().Stage(stagingArea, blockHash, block)
err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err)
}
})
err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash)
if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) {
t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+
" Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err)
}
}
func initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig *consensus.Config) *externalapi.DomainBlock {

View File

@@ -6,7 +6,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
@@ -63,7 +62,7 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea,
return err
}
if !hasReachabilityData {
blockLevel := pow.BlockLevel(header)
blockLevel := header.BlockLevel()
for i := 0; i <= blockLevel; i++ {
err = v.reachabilityManagers[i].AddBlock(stagingArea, blockHash)
if err != nil {
@@ -195,7 +194,7 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has
}
func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error {
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents())
expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents())
if err != nil {
return err
}

View File

@@ -67,8 +67,8 @@ func TestValidateMedianTime(t *testing.T) {
blockTime := tip.Header.TimeInMilliseconds()
for i := 0; i < 100; i++ {
blockTime += 1000
for i := 0; i < 10; i++ {
blockTime += 100
_, tipHash = addBlock(blockTime, []*externalapi.DomainHash{tipHash}, nil)
}
@@ -163,16 +163,17 @@ func TestCheckParentsIncest(t *testing.T) {
func TestCheckMergeSizeLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K)
consensusConfig.MergeSetSizeLimit = 5
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsIncest")
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckMergeSizeLimit")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
chain1TipHash := consensusConfig.GenesisHash
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+2; i++ {
// We add a chain larger by one than chain2 below, to make this one the selected chain
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ {
chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
@@ -180,7 +181,9 @@ func TestCheckMergeSizeLimit(t *testing.T) {
}
chain2TipHash := consensusConfig.GenesisHash
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ {
// We add a merge set of size exactly MergeSetSizeLimit (to violate the limit),
// since selected parent is also counted
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ {
chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
@@ -193,3 +196,56 @@ func TestCheckMergeSizeLimit(t *testing.T) {
}
})
}
func TestVirtualSelectionViolatingMergeSizeLimit(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K)
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestVirtualSelectionViolatingMergeSizeLimit")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
chain1TipHash := consensusConfig.GenesisHash
// We add a chain larger than chain2 below, to make this one the selected chain
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ {
chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
chain2TipHash := consensusConfig.GenesisHash
// We add a merge set of size exactly MergeSetSizeLimit-1 (to still not violate the limit)
for i := uint64(0); i < consensusConfig.MergeSetSizeLimit-1; i++ {
chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
// We now add a single block over genesis which is expected to exceed the limit
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
stagingArea := model.NewStagingArea()
virtualSelectedParent, err := tc.GetVirtualSelectedParent()
if err != nil {
t.Fatalf("GetVirtualSelectedParent: %+v", err)
}
selectedParentAnticone, err := tc.DAGTraversalManager().AnticoneFromVirtualPOV(stagingArea, virtualSelectedParent)
if err != nil {
t.Fatalf("AnticoneFromVirtualPOV: %+v", err)
}
// Test if Virtual's mergeset is too large
// Note: the selected parent itself is also counted in the mergeset limit
if len(selectedParentAnticone)+1 > (int)(consensusConfig.MergeSetSizeLimit) {
t.Fatalf("Virtual's mergset size (%d) exeeds merge set limit (%d)",
len(selectedParentAnticone)+1, consensusConfig.MergeSetSizeLimit)
}
})
}

View File

@@ -1,6 +1,9 @@
package blockvalidator_test
import (
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"reflect"
"runtime"
"testing"
"github.com/kaspanet/kaspad/domain/consensus"
@@ -13,73 +16,74 @@ import (
"github.com/pkg/errors"
)
func TestCheckParentsLimit(t *testing.T) {
func TestBlockValidator_ValidateHeaderInIsolation(t *testing.T) {
tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){
CheckParentsLimit,
CheckBlockVersion,
CheckBlockTimestampInIsolation,
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsLimit")
tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestBlockValidator_ValidateHeaderInIsolation")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
}
defer teardown(false)
for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ {
_, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
}
tips, err := tc.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
_, _, err = tc.AddBlock(tips, nil, nil)
if !errors.Is(err, ruleerrors.ErrTooManyParents) {
t.Fatalf("Unexpected error: %+v", err)
for _, test := range tests {
testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
t.Run(testName, func(t *testing.T) {
test(t, tc, consensusConfig)
})
}
})
}
func TestCheckBlockVersion(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockVersion")
func CheckParentsLimit(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ {
_, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
t.Fatalf("AddBlock: %+v", err)
}
defer teardown(false)
}
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
tips, err := tc.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
block.Header = blockheader.NewImmutableBlockHeader(
constants.MaxBlockVersion+1,
block.Header.Parents(),
block.Header.HashMerkleRoot(),
block.Header.AcceptedIDMerkleRoot(),
block.Header.UTXOCommitment(),
block.Header.TimeInMilliseconds(),
block.Header.Bits(),
block.Header.Nonce(),
block.Header.DAAScore(),
block.Header.BlueScore(),
block.Header.BlueWork(),
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrBlockVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
})
_, _, err = tc.AddBlock(tips, nil, nil)
if !errors.Is(err, ruleerrors.ErrTooManyParents) {
t.Fatalf("Unexpected error: %+v", err)
}
}
func TestCheckBlockTimestampInIsolation(t *testing.T) {
func CheckBlockVersion(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) {
block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
block.Header = blockheader.NewImmutableBlockHeader(
constants.MaxBlockVersion+1,
block.Header.Parents(),
block.Header.HashMerkleRoot(),
block.Header.AcceptedIDMerkleRoot(),
block.Header.UTXOCommitment(),
block.Header.TimeInMilliseconds(),
block.Header.Bits(),
block.Header.Nonce(),
block.Header.DAAScore(),
block.Header.BlueScore(),
block.Header.BlueWork(),
block.Header.PruningPoint(),
)
_, err = tc.ValidateAndInsertBlock(block, true)
if !errors.Is(err, ruleerrors.ErrBlockVersionIsUnknown) {
t.Fatalf("Unexpected error: %+v", err)
}
}
func CheckBlockTimestampInIsolation(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()

View File

@@ -37,6 +37,7 @@ type blockValidator struct {
finalityManager model.FinalityManager
blockParentBuilder model.BlockParentBuilder
pruningManager model.PruningManager
parentsManager model.ParentsManager
blockStore model.BlockStore
ghostdagDataStores []model.GHOSTDAGDataStore
@@ -72,6 +73,7 @@ func New(powMax *big.Int,
finalityManager model.FinalityManager,
blockParentBuilder model.BlockParentBuilder,
pruningManager model.PruningManager,
parentsManager model.ParentsManager,
pruningStore model.PruningStore,
blockStore model.BlockStore,
@@ -108,6 +110,7 @@ func New(powMax *big.Int,
finalityManager: finalityManager,
blockParentBuilder: blockParentBuilder,
pruningManager: pruningManager,
parentsManager: parentsManager,
pruningStore: pruningStore,
blockStore: blockStore,

View File

@@ -8,7 +8,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/virtual"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
)
@@ -50,9 +49,11 @@ func (v *blockValidator) ValidatePruningPointViolationAndProofOfWorkAndDifficult
}
}
err = v.checkProofOfWork(header)
if err != nil {
return err
if !blockHash.Equal(v.genesisHash) {
err = v.checkProofOfWork(header)
if err != nil {
return err
}
}
err = v.validateDifficulty(stagingArea, blockHash, isBlockWithTrustedData)
@@ -68,9 +69,9 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea,
header externalapi.BlockHeader,
isBlockWithTrustedData bool) error {
for level := 0; level <= pow.BlockLevel(header); level++ {
for level := 0; level <= header.BlockLevel(); level++ {
var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(level) {
for _, parent := range v.parentsManager.ParentsAtLevel(header, level) {
_, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false)
isNotFoundError := database.IsNotFoundError(err)
if !isNotFoundError && err != nil {
@@ -117,7 +118,7 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
return err
}
blockLevel := pow.BlockLevel(header)
blockLevel := header.BlockLevel()
for i := 1; i <= blockLevel; i++ {
err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash)
if err != nil {
@@ -149,7 +150,8 @@ func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea,
// difficulty is not performed.
func (v *blockValidator) checkProofOfWork(header externalapi.BlockHeader) error {
// The target difficulty must be larger than zero.
target := difficulty.CompactToBig(header.Bits())
state := pow.NewState(header.ToMutable())
target := &state.Target
if target.Sign() <= 0 {
return errors.Wrapf(ruleerrors.ErrNegativeTarget, "block target difficulty of %064x is too low",
target)
@@ -163,7 +165,7 @@ func (v *blockValidator) checkProofOfWork(header externalapi.BlockHeader) error
// The block pow must be valid unless the flag to avoid proof of work checks is set.
if !v.skipPoW {
valid := pow.CheckProofOfWorkWithTarget(header.ToMutable(), target)
valid := state.CheckProofOfWork()
if !valid {
return errors.Wrap(ruleerrors.ErrInvalidPoW, "block has invalid proof of work")
}

View File

@@ -101,23 +101,26 @@ func TestPOW(t *testing.T) {
t.Fatal(err)
}
random := rand.New(rand.NewSource(0))
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
// Difficulty is too high on mainnet to actually mine.
if consensusConfig.Name != "kaspa-mainnet" {
mining.SolveBlock(validBlock, random)
_, err = tc.ValidateAndInsertBlock(validBlock, true)
if err != nil {
t.Fatal(err)
}
}
})
}
// solveBlockWithWrongPOW increments the given block's nonce until it gets wrong POW (for test!).
func solveBlockWithWrongPOW(block *externalapi.DomainBlock) *externalapi.DomainBlock {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
initialNonce := uint64(0)
for i := initialNonce; i < math.MaxUint64; i++ {
headerForMining.SetNonce(i)
if !pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
header := block.Header.ToMutable()
state := pow.NewState(header)
for i := uint64(0); i < math.MaxUint64; i++ {
state.Nonce = i
if !state.CheckProofOfWork() {
header.SetNonce(state.Nonce)
block.Header = header.ToImmutable()
return block
}
}
@@ -296,7 +299,7 @@ func TestCheckPruningPointViolation(t *testing.T) {
func TestValidateDifficulty(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
factory := consensus.NewFactory()
mocDifficulty := &mocDifficultyManager{}
mocDifficulty := &mocDifficultyManager{genesisDaaScore: consensusConfig.GenesisBlock.Header.DAAScore()}
factory.SetTestDifficultyManager(func(_ model.DBReader, _ model.GHOSTDAGManager, _ model.GHOSTDAGDataStore,
_ model.BlockHeaderStore, daaBlocksStore model.DAABlocksStore, _ model.DAGTopologyManager,
_ model.DAGTraversalManager, _ *big.Int, _ int, _ bool, _ time.Duration,
@@ -342,6 +345,7 @@ type mocDifficultyManager struct {
testDifficulty uint32
testGenesisBits uint32
daaBlocksStore model.DAABlocksStore
genesisDaaScore uint64
}
// RequiredDifficulty returns the difficulty required for the test
@@ -352,7 +356,7 @@ func (dm *mocDifficultyManager) RequiredDifficulty(*model.StagingArea, *external
// StageDAADataAndReturnRequiredDifficulty returns the difficulty required for the test
func (dm *mocDifficultyManager) StageDAADataAndReturnRequiredDifficulty(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (uint32, error) {
// Populate daaBlocksStore with fake values
dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, 0)
dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, dm.genesisDaaScore)
dm.daaBlocksStore.StageBlockDAAAddedBlocks(stagingArea, blockHash, nil)
return dm.testDifficulty, nil

View File

@@ -49,7 +49,7 @@ func (c *coinbaseManager) isBlockRewardFixed(stagingArea *model.StagingArea, blo
blueWorkDifference := new(big.Int).Sub(highPruningPointHeader.BlueWork(), lowPruningPointHeader.BlueWork())
blueScoreDifference := new(big.Int).SetUint64(highPruningPointHeader.BlueScore() - lowPruningPointHeader.BlueScore())
estimatedAverageHashRate := new(big.Int).Div(blueWorkDifference, blueScoreDifference)
if estimatedAverageHashRate.Cmp(c.fixedSubsidySwitchHashRateDifference) >= 0 {
if estimatedAverageHashRate.Cmp(c.fixedSubsidySwitchHashRateThreshold) >= 0 {
return true, nil
}

View File

@@ -32,7 +32,7 @@ func TestBlockRewardSwitch(t *testing.T) {
// Set the hash rate difference such that the switch would trigger exactly
// on the `FixedSubsidySwitchPruningPointInterval + 1`th pruning point
workToAcceptGenesis := difficulty.CalcWork(consensusConfig.GenesisBlock.Header.Bits())
consensusConfig.FixedSubsidySwitchHashRateDifference = workToAcceptGenesis
consensusConfig.FixedSubsidySwitchHashRateThreshold = workToAcceptGenesis
// Set the min, max, and post-switch subsidies to values that would make it
// easy to tell whether the switch happened
@@ -86,36 +86,5 @@ func TestBlockRewardSwitch(t *testing.T) {
t.Fatalf("Subsidy has unexpected value. Want: %d, got: %d", consensusConfig.MinSubsidy, subsidy)
}
}
// Add another block. We expect it to be another pruning point
lastPruningPointHash, _, err := tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil)
if err != nil {
t.Fatalf("AddBlock: %+v", err)
}
// Make sure that another pruning point had been added
pruningPointHeaders, err = tc.PruningPointHeaders()
if err != nil {
t.Fatalf("PruningPointHeaders: %+v", pruningPointHeaders)
}
expectedPruningPointHeaderAmount = expectedPruningPointHeaderAmount + 1
if uint64(len(pruningPointHeaders)) != expectedPruningPointHeaderAmount {
t.Fatalf("Unexpected amount of pruning point headers. "+
"Want: %d, got: %d", expectedPruningPointHeaderAmount, len(pruningPointHeaders))
}
// Make sure that the last pruning point has a post-switch subsidy
lastPruningPoint, err := tc.GetBlock(lastPruningPointHash)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
lastPruningPointCoinbase := lastPruningPoint.Transactions[transactionhelper.CoinbaseTransactionIndex]
_, _, subsidy, err := tc.CoinbaseManager().ExtractCoinbaseDataBlueScoreAndSubsidy(lastPruningPointCoinbase)
if err != nil {
t.Fatalf("ExtractCoinbaseDataBlueScoreAndSubsidy: %+v", err)
}
if subsidy != consensusConfig.SubsidyGenesisReward {
t.Fatalf("Subsidy has unexpected value. Want: %d, got: %d", consensusConfig.SubsidyGenesisReward, subsidy)
}
})
}

View File

@@ -2,6 +2,8 @@ package coinbasemanager
import (
"encoding/binary"
"math/big"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
@@ -10,7 +12,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
"math/big"
)
type coinbaseManager struct {
@@ -22,7 +23,7 @@ type coinbaseManager struct {
coinbasePayloadScriptPublicKeyMaxLength uint8
genesisHash *externalapi.DomainHash
fixedSubsidySwitchPruningPointInterval uint64
fixedSubsidySwitchHashRateDifference *big.Int
fixedSubsidySwitchHashRateThreshold *big.Int
databaseContext model.DBReader
dagTraversalManager model.DAGTraversalManager
@@ -190,51 +191,7 @@ func (c *coinbaseManager) CalcBlockSubsidy(stagingArea *model.StagingArea,
return c.subsidyGenesisReward, nil
}
isBlockRewardFixed, err := c.isBlockRewardFixed(stagingArea, blockPruningPoint)
if err != nil {
return 0, err
}
if isBlockRewardFixed {
return c.subsidyGenesisReward, nil
}
averagePastSubsidy, err := c.calculateAveragePastSubsidy(stagingArea, blockHash)
if err != nil {
return 0, err
}
mergeSetSubsidySum, err := c.calculateMergeSetSubsidySum(stagingArea, blockHash)
if err != nil {
return 0, err
}
subsidyRandomVariable, err := c.calculateSubsidyRandomVariable(stagingArea, blockHash)
if err != nil {
return 0, err
}
pastSubsidy := new(big.Rat).Mul(averagePastSubsidy, c.subsidyPastRewardMultiplier)
mergeSetSubsidy := new(big.Rat).Mul(mergeSetSubsidySum, c.subsidyMergeSetRewardMultiplier)
// In order to avoid unsupported negative exponents in powInt64, flip
// the numerator and the denominator manually
subsidyRandom := new(big.Rat)
if subsidyRandomVariable >= 0 {
subsidyRandom = subsidyRandom.SetInt64(1 << subsidyRandomVariable)
} else {
subsidyRandom = subsidyRandom.SetFrac64(1, 1<<(-subsidyRandomVariable))
}
blockSubsidyBigRat := new(big.Rat).Add(mergeSetSubsidy, new(big.Rat).Mul(pastSubsidy, subsidyRandom))
blockSubsidyBigInt := new(big.Int).Div(blockSubsidyBigRat.Num(), blockSubsidyBigRat.Denom())
blockSubsidyUint64 := blockSubsidyBigInt.Uint64()
clampedBlockSubsidy := blockSubsidyUint64
if clampedBlockSubsidy < c.minSubsidy {
clampedBlockSubsidy = c.minSubsidy
} else if clampedBlockSubsidy > c.maxSubsidy {
clampedBlockSubsidy = c.maxSubsidy
}
return clampedBlockSubsidy, nil
return c.maxSubsidy, nil
}
func (c *coinbaseManager) calculateAveragePastSubsidy(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*big.Rat, error) {
@@ -364,7 +321,7 @@ func New(
coinbasePayloadScriptPublicKeyMaxLength uint8,
genesisHash *externalapi.DomainHash,
fixedSubsidySwitchPruningPointInterval uint64,
fixedSubsidySwitchHashRateDifference *big.Int,
fixedSubsidySwitchHashRateThreshold *big.Int,
dagTraversalManager model.DAGTraversalManager,
ghostdagDataStore model.GHOSTDAGDataStore,
@@ -385,7 +342,7 @@ func New(
coinbasePayloadScriptPublicKeyMaxLength: coinbasePayloadScriptPublicKeyMaxLength,
genesisHash: genesisHash,
fixedSubsidySwitchPruningPointInterval: fixedSubsidySwitchPruningPointInterval,
fixedSubsidySwitchHashRateDifference: fixedSubsidySwitchHashRateDifference,
fixedSubsidySwitchHashRateThreshold: fixedSubsidySwitchHashRateThreshold,
dagTraversalManager: dagTraversalManager,
ghostdagDataStore: ghostdagDataStore,

View File

@@ -75,7 +75,7 @@ func TestVirtualDiff(t *testing.T) {
blockB.Transactions[0].Outputs[0].Value,
blockB.Transactions[0].Outputs[0].ScriptPublicKey,
true,
2, //Expected virtual DAA score
consensusConfig.GenesisBlock.Header.DAAScore()+2, //Expected virtual DAA score
)) {
t.Fatalf("Unexpected entry %s", entry)
}

View File

@@ -2,7 +2,6 @@ package consensusstatemanager
import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
@@ -23,8 +22,16 @@ func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(stagingArea
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Block %s is the genesis. By definition, "+
"it has an empty UTXO diff, empty acceptance data, and a blank multiset", blockHash)
return utxo.NewUTXODiff(), externalapi.AcceptanceData{}, multiset.New(), nil
"it has a predefined UTXO diff, empty acceptance data, and a predefined multiset", blockHash)
multiset, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
}
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, nil, nil, err
}
return utxoDiff, externalapi.AcceptanceData{}, multiset, nil
}
blockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, blockHash, false)

View File

@@ -4,7 +4,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/multiset"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
)
@@ -19,8 +18,8 @@ func (csm *consensusStateManager) calculateMultiset(stagingArea *model.StagingAr
if blockHash.Equal(csm.genesisHash) {
log.Debugf("Selected parent is nil, which could only happen for the genesis. " +
"The genesis, by definition, has an empty multiset")
return multiset.New(), nil
"The genesis has a predefined multiset")
return csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash)
}
ms, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockGHOSTDAGData.SelectedParent())

View File

@@ -59,7 +59,8 @@ func (csm *consensusStateManager) pickVirtualParents(stagingArea *model.StagingA
selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent}
mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet
for len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
// First condition implies that no point in searching since limit was already reached
for mergeSetSize < csm.mergeSetSizeLimit && len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
candidate := candidates[0]
candidates = candidates[1:]

View File

@@ -35,7 +35,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
var selectedTip *externalapi.DomainHash
isCompletelyResolved := true
for _, tip := range tips {
log.Infof("Resolving tip %s", tip)
log.Debugf("Resolving tip %s", tip)
resolveStagingArea := model.NewStagingArea()
unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, tip)
if err != nil {
@@ -46,7 +46,7 @@ func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (boo
hasMoreUnverifiedThanMax := maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve
if hasMoreUnverifiedThanMax {
resolveTip = unverifiedBlocks[uint64(len(unverifiedBlocks))-maxBlocksToResolve]
log.Infof("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
log.Debugf("Has more than %d blocks to resolve. Changing the resolve tip to %s", maxBlocksToResolve, resolveTip)
}
blockStatus, reversalData, err := csm.resolveBlockStatus(resolveStagingArea, resolveTip, true)

View File

@@ -3,8 +3,6 @@ package consensusstatemanager
import (
"fmt"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/util/staging"
"github.com/kaspanet/kaspad/domain/consensus/model"
@@ -129,7 +127,11 @@ func (csm *consensusStateManager) selectedParentInfo(
if lastUnverifiedBlock.Equal(csm.genesisHash) {
log.Debugf("the most recent unverified block is the genesis block, "+
"which by definition has status: %s", externalapi.StatusUTXOValid)
return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxo.NewUTXODiff(), nil
utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, lastUnverifiedBlock)
if err != nil {
return nil, 0, nil, err
}
return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxoDiff, nil
}
lastUnverifiedBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, lastUnverifiedBlock, false)
if err != nil {

View File

@@ -285,7 +285,7 @@ func TestTransactionAcceptance(t *testing.T) {
if err != nil {
t.Fatalf("Error getting blockF: %+v", err)
}
updatedDAAScoreVirtualBlock := 26
updatedDAAScoreVirtualBlock := consensusConfig.GenesisBlock.Header.DAAScore() + 26
//We expect the second transaction in the "blue block" (blueChildOfRedBlock) to be accepted because the merge set is ordered topologically
//and the red block is ordered topologically before the "blue block" so the input is known in the UTXOSet.
expectedAcceptanceData := externalapi.AcceptanceData{

View File

@@ -22,78 +22,6 @@ func TestBlockWindow(t *testing.T) {
expectedWindow []string
}{
dagconfig.MainnetParams.Name: {
{
parents: []string{"A"},
id: "B",
expectedWindow: []string{},
},
{
parents: []string{"B"},
id: "C",
expectedWindow: []string{"B"},
},
{
parents: []string{"B"},
id: "D",
expectedWindow: []string{"B"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"A"},
id: "G",
expectedWindow: []string{},
},
{
parents: []string{"G"},
id: "H",
expectedWindow: []string{"G"},
},
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
},
},
dagconfig.TestnetParams.Name: {
{
parents: []string{"A"},
id: "B",
@@ -139,6 +67,7 @@ func TestBlockWindow(t *testing.T) {
id: "J",
expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"},
},
//
{
parents: []string{"J"},
id: "K",
@@ -165,6 +94,78 @@ func TestBlockWindow(t *testing.T) {
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
},
},
dagconfig.TestnetParams.Name: {
{
parents: []string{"A"},
id: "B",
expectedWindow: []string{},
},
{
parents: []string{"B"},
id: "C",
expectedWindow: []string{"B"},
},
{
parents: []string{"B"},
id: "D",
expectedWindow: []string{"B"},
},
{
parents: []string{"C", "D"},
id: "E",
expectedWindow: []string{"C", "D", "B"},
},
{
parents: []string{"C", "D"},
id: "F",
expectedWindow: []string{"C", "D", "B"},
},
{
parents: []string{"A"},
id: "G",
expectedWindow: []string{},
},
{
parents: []string{"G"},
id: "H",
expectedWindow: []string{"G"},
},
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
},
},
dagconfig.DevnetParams.Name: {
{
parents: []string{"A"},
@@ -182,12 +183,12 @@ func TestBlockWindow(t *testing.T) {
expectedWindow: []string{"B"},
},
{
parents: []string{"D", "C"},
parents: []string{"C", "D"},
id: "E",
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"D", "C"},
parents: []string{"C", "D"},
id: "F",
expectedWindow: []string{"D", "C", "B"},
},
@@ -204,37 +205,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "D", "C", "H", "G", "B"},
expectedWindow: []string{"F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "D", "C", "H", "G", "B"},
expectedWindow: []string{"I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "D", "C", "H", "G", "B"},
expectedWindow: []string{"J", "I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "G", "B"},
expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "G", "B"},
expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "G"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"},
},
},
dagconfig.SimnetParams.Name: {
@@ -256,12 +257,12 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"D", "C"},
id: "E",
expectedWindow: []string{"C", "D", "B"},
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"D", "C"},
id: "F",
expectedWindow: []string{"C", "D", "B"},
expectedWindow: []string{"D", "C", "B"},
},
{
parents: []string{"A"},
@@ -276,37 +277,37 @@ func TestBlockWindow(t *testing.T) {
{
parents: []string{"H", "F"},
id: "I",
expectedWindow: []string{"F", "C", "D", "H", "G", "B"},
expectedWindow: []string{"F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"I"},
id: "J",
expectedWindow: []string{"I", "F", "C", "D", "H", "G", "B"},
expectedWindow: []string{"I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"J"},
id: "K",
expectedWindow: []string{"J", "I", "F", "C", "D", "H", "G", "B"},
expectedWindow: []string{"J", "I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"K"},
id: "L",
expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "G", "B"},
expectedWindow: []string{"K", "J", "I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"L"},
id: "M",
expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "G", "B"},
expectedWindow: []string{"L", "K", "J", "I", "F", "H", "D", "C", "B", "G"},
},
{
parents: []string{"M"},
id: "N",
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "G"},
expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "D", "C", "B"},
},
{
parents: []string{"N"},
id: "O",
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"},
expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "D", "C"},
},
},
}

View File

@@ -105,10 +105,14 @@ func (dm *difficultyManager) requiredDifficultyFromTargetsWindow(targetsWindow b
return dm.genesisBits, nil
}
// in the past this was < 2 as the comment explains, we changed it to under the window size to
// make the hashrate(which is ~1.5GH/s) constant in the first 2641 blocks so that we won't have a lot of tips
// We need at least 2 blocks to get a timestamp interval
// We could instead clamp the timestamp difference to `targetTimePerBlock`,
// but then everything will cancel out and we'll get the target from the last block, which will be the same as genesis.
if len(targetsWindow) < 2 {
// We add 64 as a safety margin
if len(targetsWindow) < 2 || len(targetsWindow) < dm.difficultyAdjustmentWindowSize {
return dm.genesisBits, nil
}
windowMinTimestamp, windowMaxTimeStamp, windowsMinIndex, _ := targetsWindow.minMaxTimestamps()
@@ -157,7 +161,11 @@ func (dm *difficultyManager) calculateDaaScoreAndAddedBlocks(stagingArea *model.
isBlockWithTrustedData bool) (uint64, []*externalapi.DomainHash, error) {
if blockHash.Equal(dm.genesisHash) {
return 0, nil, nil
genesisHeader, err := dm.headerStore.BlockHeader(dm.databaseContext, stagingArea, dm.genesisHash)
if err != nil {
return 0, nil, err
}
return genesisHeader.DAAScore(), nil, nil
}
ghostdagData, err := dm.ghostdagStore.Get(dm.databaseContext, stagingArea, blockHash, false)

View File

@@ -31,7 +31,7 @@ func TestDifficulty(t *testing.T) {
}
consensusConfig.K = 1
consensusConfig.DifficultyAdjustmentWindowSize = 265
consensusConfig.DifficultyAdjustmentWindowSize = 140
factory := consensus.NewFactory()
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDifficulty")
@@ -108,7 +108,7 @@ func TestDifficulty(t *testing.T) {
"window size, the difficulty should be the same as genesis'")
}
}
for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+100; i++ {
for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+10; i++ {
tip, tipHash = addBlock(0, tipHash)
if tip.Header.Bits() != consensusConfig.GenesisBlock.Header.Bits() {
t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change")
@@ -130,9 +130,9 @@ func TestDifficulty(t *testing.T) {
var expectedBits uint32
switch consensusConfig.Name {
case dagconfig.TestnetParams.Name, dagconfig.DevnetParams.Name:
expectedBits = uint32(0x1e7f83df)
expectedBits = uint32(0x1e7f1441)
case dagconfig.MainnetParams.Name:
expectedBits = uint32(0x207f83df)
expectedBits = uint32(0x1d02c50f)
}
if tip.Header.Bits() != expectedBits {
@@ -231,7 +231,7 @@ func TestDifficulty(t *testing.T) {
func TestDAAScore(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
consensusConfig.DifficultyAdjustmentWindowSize = 265
consensusConfig.DifficultyAdjustmentWindowSize = 86
stagingArea := model.NewStagingArea()
@@ -263,9 +263,9 @@ func TestDAAScore(t *testing.T) {
t.Fatalf("DAAScore: %+v", err)
}
blockBlueScore3ExpectedDAAScore := uint64(2)
blockBlueScore3ExpectedDAAScore := uint64(2) + consensusConfig.GenesisBlock.Header.DAAScore()
if blockBlueScore3DAAScore != blockBlueScore3ExpectedDAAScore {
t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3ExpectedDAAScore)
t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3DAAScore)
}
tipDAAScore := blockBlueScore3ExpectedDAAScore

View File

@@ -0,0 +1,41 @@
package parentssanager
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
)
type parentsManager struct {
genesisHash *externalapi.DomainHash
}
// New instantiates a new ParentsManager
func New(genesisHash *externalapi.DomainHash) model.ParentsManager {
return &parentsManager{
genesisHash: genesisHash,
}
}
func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents {
var parentsAtLevel externalapi.BlockLevelParents
if len(blockHeader.Parents()) > level {
parentsAtLevel = blockHeader.Parents()[level]
}
if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 {
return externalapi.BlockLevelParents{pm.genesisHash}
}
return parentsAtLevel
}
func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents {
numParents := constants.MaxBlockLevel + 1
parents := make([]externalapi.BlockLevelParents, numParents)
for i := 0; i < numParents; i++ {
parents[i] = pm.ParentsAtLevel(blockHeader, i)
}
return parents
}

View File

@@ -2,7 +2,6 @@ package pruningmanager_test
import (
"encoding/json"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"os"
"path/filepath"
@@ -39,12 +38,14 @@ func TestPruning(t *testing.T) {
"dag-for-test-pruning.json": {
dagconfig.MainnetParams.Name: "503",
dagconfig.TestnetParams.Name: "502",
dagconfig.DevnetParams.Name: "503",
dagconfig.SimnetParams.Name: "503",
dagconfig.DevnetParams.Name: "502",
dagconfig.SimnetParams.Name: "502",
},
}
testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) {
// Improve the performance of the test a little
consensusConfig.DisableDifficultyAdjustment = true
err := filepath.Walk("./testdata", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -72,6 +73,7 @@ func TestPruning(t *testing.T) {
consensusConfig.DifficultyAdjustmentWindowSize = 400
factory := consensus.NewFactory()
factory.SetTestLevelDBCacheSize(128)
tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPruning")
if err != nil {
t.Fatalf("Error setting up consensus: %+v", err)
@@ -140,12 +142,11 @@ func TestPruning(t *testing.T) {
// We expect blocks that are within the difficulty adjustment window size of
// the pruning point and its anticone to not get pruned
unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{})
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticoneWithTrustedData()
pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone()
if err != nil {
t.Fatalf("pruningPointAndItsAnticone: %+v", err)
}
for _, block := range pruningPointAndItsAnticone {
blockHash := consensushashing.BlockHash(block.Block)
for _, blockHash := range pruningPointAndItsAnticone {
unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{}
blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize)
if err != nil {

View File

@@ -11,6 +11,7 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/staging"
"github.com/pkg/errors"
"sort"
)
// pruningManager resolves and manages the current pruning point
@@ -674,7 +675,19 @@ func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPoints(st
onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.calculateDiffBetweenPreviousAndCurrentPruningPoints")
defer onEnd()
if currentPruningHash.Equal(pm.genesisHash) {
return utxo.NewUTXODiff(), nil
iter, err := pm.consensusStateManager.RestorePastUTXOSetIterator(stagingArea, currentPruningHash)
if err != nil {
return nil, err
}
set := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
for ok := iter.First(); ok; ok = iter.Next() {
outpoint, entry, err := iter.Get()
if err != nil {
return nil, err
}
set[*outpoint] = entry
}
return utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(set), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)))
}
pruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea)
@@ -906,8 +919,8 @@ func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pr
return nil
}
func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*externalapi.BlockWithTrustedData, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticoneWithTrustedData")
func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone")
defer onEnd()
stagingArea := model.NewStagingArea()
@@ -921,27 +934,32 @@ func (pm *pruningManager) PruningPointAndItsAnticoneWithTrustedData() ([]*extern
return nil, err
}
blocks := make([]*externalapi.BlockWithTrustedData, 0, len(pruningPointAnticone)+1)
pruningPointWithTrustedData, err := pm.blockWithTrustedData(stagingArea, pruningPoint)
if err != nil {
return nil, err
}
blocks = append(blocks, pruningPointWithTrustedData)
for _, blockHash := range pruningPointAnticone {
blockWithTrustedData, err := pm.blockWithTrustedData(stagingArea, blockHash)
// Sorting the blocks in topological order
var sortErr error
sort.Slice(pruningPointAnticone, func(i, j int) bool {
headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i])
if err != nil {
return nil, err
sortErr = err
return false
}
blocks = append(blocks, blockWithTrustedData)
headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j])
if err != nil {
sortErr = err
return false
}
return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0
})
if sortErr != nil {
return nil, sortErr
}
return blocks, nil
// The pruning point should always come first
return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil
}
func (pm *pruningManager) blockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
func (pm *pruningManager) BlockWithTrustedData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockWithTrustedData, error) {
block, err := pm.blocksStore.Block(pm.databaseContext, stagingArea, blockHash)
if err != nil {
return nil, err

View File

@@ -0,0 +1,5 @@
package pruningproofmanager
import "github.com/kaspanet/kaspad/infrastructure/logger"
var log = logger.RegisterSubSystem("PPMN")

View File

@@ -15,8 +15,8 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"math/big"
)
@@ -28,6 +28,7 @@ type pruningProofManager struct {
ghostdagManagers []model.GHOSTDAGManager
reachabilityManagers []model.ReachabilityManager
dagTraversalManagers []model.DAGTraversalManager
parentsManager model.ParentsManager
ghostdagDataStores []model.GHOSTDAGDataStore
pruningStore model.PruningStore
@@ -39,6 +40,9 @@ type pruningProofManager struct {
genesisHash *externalapi.DomainHash
k externalapi.KType
pruningProofM uint64
cachedPruningPoint *externalapi.DomainHash
cachedProof *externalapi.PruningPointProof
}
// New instantiates a new PruningManager
@@ -49,6 +53,7 @@ func New(
ghostdagManagers []model.GHOSTDAGManager,
reachabilityManagers []model.ReachabilityManager,
dagTraversalManagers []model.DAGTraversalManager,
parentsManager model.ParentsManager,
ghostdagDataStores []model.GHOSTDAGDataStore,
pruningStore model.PruningStore,
@@ -68,6 +73,7 @@ func New(
ghostdagManagers: ghostdagManagers,
reachabilityManagers: reachabilityManagers,
dagTraversalManagers: dagTraversalManagers,
parentsManager: parentsManager,
ghostdagDataStores: ghostdagDataStores,
pruningStore: pruningStore,
@@ -83,6 +89,33 @@ func New(
}
func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
}
if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) {
return ppm.cachedProof, nil
}
proof, err := ppm.buildPruningPointProof(stagingArea)
if err != nil {
return nil, err
}
ppm.cachedProof = proof
ppm.cachedPruningPoint = pruningPoint
return proof, nil
}
func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof")
defer onEnd()
pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea)
if err != nil {
return nil, err
@@ -97,17 +130,33 @@ func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.Stagin
return nil, err
}
maxLevel := len(pruningPointHeader.Parents()) - 1
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
headersByLevel := make(map[int][]externalapi.BlockHeader)
selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1)
pruningPointLevel := pow.BlockLevel(pruningPointHeader)
pruningPointLevel := pruningPointHeader.BlockLevel()
for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- {
var selectedTip *externalapi.DomainHash
if blockLevel <= pruningPointLevel {
selectedTip = pruningPoint
} else {
blockLevelParents := pruningPointHeader.ParentsAtLevel(blockLevel)
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, []*externalapi.DomainHash(blockLevelParents)...)
blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel)
selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents))
// In a pruned node, some pruning point parents might be missing, but we're guaranteed that its
// selected parent is not missing.
for _, parent := range blockLevelParents {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
}
if err != nil {
return nil, err
}
selectedTipCandidates = append(selectedTipCandidates, parent)
}
selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...)
if err != nil {
return nil, err
}
@@ -248,6 +297,9 @@ func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, gho
}
func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof")
defer onEnd()
stagingArea := model.NewStagingArea()
if len(pruningPointProof.Headers) == 0 {
@@ -257,8 +309,8 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
level0Headers := pruningPointProof.Headers[0]
pruningPointHeader := level0Headers[len(level0Headers)-1]
pruningPoint := consensushashing.HeaderHash(pruningPointHeader)
pruningPointBlockLevel := pow.BlockLevel(pruningPointHeader)
maxLevel := len(pruningPointHeader.Parents()) - 1
pruningPointBlockLevel := pruningPointHeader.BlockLevel()
maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1
if maxLevel >= len(pruningPointProof.Headers) {
return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+
"has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1)
@@ -300,15 +352,15 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if pow.BlockLevel(header) < blockLevel {
if header.BlockLevel() < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
}
blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(blockLevel) {
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
_, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue
@@ -377,7 +429,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
}
}
if !selectedTip.Equal(pruningPoint) && !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+
"level %d is not a parent of the pruning point", selectedTip, blockLevel)
}
@@ -395,7 +447,7 @@ func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *ext
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+
"proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel)
}
} else if !pruningPointHeader.ParentsAtLevel(blockLevel).Contains(selectedTip) {
} else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) {
return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+
"proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+
"level", selectedTip, blockLevel)
@@ -554,19 +606,22 @@ func (ppm *pruningProofManager) dagProcesses(
}
func (ppm *pruningProofManager) ApplyPruningPointProof(stagingArea *model.StagingArea, pruningPointProof *externalapi.PruningPointProof) error {
onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof")
defer onEnd()
for blockLevel, headers := range pruningPointProof.Headers {
var selectedTip *externalapi.DomainHash
for i, header := range headers {
blockHash := consensushashing.HeaderHash(header)
if pow.BlockLevel(header) < blockLevel {
if header.BlockLevel() < blockLevel {
return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+
"expected to be at least %d", blockHash, pow.BlockLevel(header), blockLevel)
"expected to be at least %d", blockHash, header.BlockLevel(), blockLevel)
}
ppm.blockHeaderStore.Stage(stagingArea, blockHash, header)
var parents []*externalapi.DomainHash
for _, parent := range header.ParentsAtLevel(blockLevel) {
for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) {
_, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false)
if database.IsNotFoundError(err) {
continue

View File

@@ -49,7 +49,7 @@ func TestCheckLockTimeVerifyConditionedByDAAScore(t *testing.T) {
}
fees := uint64(1)
//Create a CLTV script:
targetDAAScore := uint64(30)
targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30)
redeemScriptCLTV, err := createScriptCLTV(targetDAAScore)
if err != nil {
t.Fatalf("Failed to create a script using createScriptCLTV: %v", err)
@@ -156,7 +156,7 @@ func TestCheckLockTimeVerifyConditionedByDAAScoreWithWrongLockTime(t *testing.T)
}
fees := uint64(1)
//Create a CLTV script:
targetDAAScore := uint64(30)
targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30)
redeemScriptCLTV, err := createScriptCLTV(targetDAAScore)
if err != nil {
t.Fatalf("Failed to create a script using createScriptCLTV: %v", err)

View File

@@ -2,6 +2,7 @@ package blockheader
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"math/big"
)
@@ -18,6 +19,9 @@ type blockHeader struct {
blueScore uint64
blueWork *big.Int
pruningPoint *externalapi.DomainHash
isBlockLevelCached bool
blockLevel int
}
func (bh *blockHeader) BlueScore() uint64 {
@@ -41,10 +45,12 @@ func (bh *blockHeader) ToImmutable() externalapi.BlockHeader {
}
func (bh *blockHeader) SetNonce(nonce uint64) {
bh.isBlockLevelCached = false
bh.nonce = nonce
}
func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) {
bh.isBlockLevelCached = false
bh.timeInMilliseconds = timeInMilliseconds
}
@@ -56,16 +62,12 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents {
return bh.parents
}
func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents {
if len(bh.parents) <= level {
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
if len(bh.parents) == 0 {
return externalapi.BlockLevelParents{}
}
return bh.parents[level]
}
func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents {
return bh.ParentsAtLevel(0)
return bh.parents[0]
}
func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash {
@@ -177,6 +179,15 @@ func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader {
return bh.clone()
}
func (bh *blockHeader) BlockLevel() int {
if !bh.isBlockLevelCached {
bh.blockLevel = pow.BlockLevel(bh)
bh.isBlockLevelCached = true
}
return bh.blockLevel
}
// NewImmutableBlockHeader returns a new immutable header
func NewImmutableBlockHeader(
version uint16,

View File

@@ -36,6 +36,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
8,
big.NewInt(9),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}),
false,
0,
},
expectedResult: false,
},
@@ -55,6 +57,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
headersToCompareTo: []headerToCompare{
{
@@ -75,6 +79,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: true,
},
@@ -92,6 +98,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -111,6 +119,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -128,6 +138,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -145,6 +157,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -162,6 +176,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -179,6 +195,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -196,6 +214,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -213,6 +233,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -230,6 +252,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -247,6 +271,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -264,6 +290,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
100,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -281,6 +309,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(100),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}),
false,
0,
},
expectedResult: false,
},
@@ -298,6 +328,8 @@ func TestDomainBlockHeader_Equal(t *testing.T) {
9,
big.NewInt(10),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}),
false,
0,
},
expectedResult: false,
},

View File

@@ -37,5 +37,7 @@ const (
LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC
// MaxBlockLevel is the maximum possible block level.
MaxBlockLevel = 255
// This is technically 255, but we clamped it at 256 - block level of mainnet genesis
// This means that any block that has a level lower or equal to genesis will be level 0.
MaxBlockLevel = 225
)

View File

@@ -4,6 +4,7 @@ import (
"crypto/sha256"
"github.com/pkg/errors"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/sha3"
)
const (
@@ -66,21 +67,15 @@ func NewBlockHashWriter() HashWriter {
}
// NewPoWHashWriter Returns a new HashWriter used for the PoW function
func NewPoWHashWriter() HashWriter {
blake, err := blake2b.New256([]byte(proofOfWorkDomain))
if err != nil {
panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", proofOfWorkDomain))
}
return HashWriter{blake}
func NewPoWHashWriter() ShakeHashWriter {
shake256 := sha3.NewCShake256(nil, []byte(proofOfWorkDomain))
return ShakeHashWriter{shake256}
}
// NewHeavyHashWriter Returns a new HashWriter used for the HeavyHash function
func NewHeavyHashWriter() HashWriter {
blake, err := blake2b.New256([]byte(heavyHashDomain))
if err != nil {
panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", heavyHashDomain))
}
return HashWriter{blake}
func NewHeavyHashWriter() ShakeHashWriter {
shake256 := sha3.NewCShake256(nil, []byte(heavyHashDomain))
return ShakeHashWriter{shake256}
}
// NewMerkleBranchHashWriter Returns a new HashWriter used for a merkle tree branch

View File

@@ -3,6 +3,7 @@ package hashes
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"hash"
)
@@ -30,3 +31,29 @@ func (h HashWriter) Finalize() *externalapi.DomainHash {
copy(sum[:], h.Sum(sum[:0]))
return externalapi.NewDomainHashFromByteArray(&sum)
}
// ShakeHashWriter is exactly the same as HashWriter but for CShake256
type ShakeHashWriter struct {
sha3.ShakeHash
}
// InfallibleWrite is just like write but doesn't return anything
func (h *ShakeHashWriter) InfallibleWrite(p []byte) {
// This write can never return an error, this is part of the hash.Hash interface contract.
_, err := h.Write(p)
if err != nil {
panic(errors.Wrap(err, "this should never happen. sha3.ShakeHash interface promises to not return errors."))
}
}
// Finalize returns the resulting hash
func (h *ShakeHashWriter) Finalize() *externalapi.DomainHash {
var sum [externalapi.DomainHashSize]byte
// This should prevent `Sum` for allocating an output buffer, by using the DomainHash buffer. we still copy because we don't want to rely on that.
_, err := h.Read(sum[:])
if err != nil {
panic(errors.Wrap(err, "this should never happen. sha3.ShakeHash interface promises to not return errors."))
}
h.ShakeHash = nil // prevent double reading as it will return a different hash
return externalapi.NewDomainHashFromByteArray(&sum)
}

View File

@@ -0,0 +1,50 @@
package hashes
import (
"math/rand"
"testing"
)
func BenchmarkNewBlockHashWriterSmall(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [32]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewBlockHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}
func BenchmarkNewBlockHashWriterBig(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [1024]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewBlockHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}
func BenchmarkNewHeavyHashWriterSmall(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [32]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewHeavyHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}
func BenchmarkNewHeavyHashWriterBig(b *testing.B) {
r := rand.New(rand.NewSource(0))
var someBytes [1024]byte
r.Read(someBytes[:])
for i := 0; i < b.N; i++ {
hasher := NewHeavyHashWriter()
hasher.InfallibleWrite(someBytes[:])
hasher.Finalize()
}
}

View File

@@ -6,18 +6,17 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
)
// SolveBlock increments the given block's nonce until it matches the difficulty requirements in its bits field
func SolveBlock(block *externalapi.DomainBlock, rd *rand.Rand) {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
for i := rd.Uint64(); i < math.MaxUint64; i++ {
headerForMining.SetNonce(i)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
header := block.Header.ToMutable()
state := pow.NewState(header)
for state.Nonce = rd.Uint64(); state.Nonce < math.MaxUint64; state.Nonce++ {
if state.CheckProofOfWork() {
header.SetNonce(state.Nonce)
block.Header = header.ToImmutable()
return
}
}

View File

@@ -39,7 +39,7 @@ func (mat *matrix) computeRank() int {
var rowSelected [64]bool
for i := 0; i < 64; i++ {
var j int
for j := 0; j < 64; j++ {
for j = 0; j < 64; j++ {
if !rowSelected[j] && math.Abs(B[j][i]) > eps {
break
}

View File

@@ -3,13 +3,154 @@ package pow
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"math/rand"
"testing"
)
func BenchmarkMatrix_HeavyHash(b *testing.B) {
input := []byte("BenchmarkMatrix_HeavyHash")
writer := hashes.NewPoWHashWriter()
writer.InfallibleWrite(input)
hash := writer.Finalize()
matrix := generateMatrix(hash)
for i := 0; i < b.N; i++ {
hash = matrix.HeavyHash(hash)
}
}
func BenchmarkMatrix_Generate(b *testing.B) {
r := rand.New(rand.NewSource(0))
h := [32]byte{}
r.Read(h[:])
hash := externalapi.NewDomainHashFromByteArray(&h)
for i := 0; i < b.N; i++ {
generateMatrix(hash)
}
}
func BenchmarkMatrix_Rank(b *testing.B) {
r := rand.New(rand.NewSource(0))
h := [32]byte{}
r.Read(h[:])
hash := externalapi.NewDomainHashFromByteArray(&h)
matrix := generateMatrix(hash)
for i := 0; i < b.N; i++ {
matrix.computeRank()
}
}
func TestMatrix_Rank(t *testing.T) {
var mat matrix
if mat.computeRank() != 0 {
t.Fatalf("The zero matrix should have rank 0, instead got: %d", mat.computeRank())
}
r := rand.New(rand.NewSource(0))
h := [32]byte{}
r.Read(h[:])
hash := externalapi.NewDomainHashFromByteArray(&h)
mat = *generateMatrix(hash)
if mat.computeRank() != 64 {
t.Fatalf("generateMatrix() should always return full rank matrix, instead got: %d", mat.computeRank())
}
for i := range mat {
mat[0][i] = mat[1][i] * 2
}
if mat.computeRank() != 63 {
t.Fatalf("Made a linear depenency between 2 rows, rank should be 63, instead got: %d", mat.computeRank())
}
for i := range mat {
mat[33][i] = mat[32][i] * 3
}
if mat.computeRank() != 62 {
t.Fatalf("Made anoter linear depenency between 2 rows, rank should be 62, instead got: %d", mat.computeRank())
}
}
func TestGenerateMatrix(t *testing.T) {
hashBytes := [32]byte{42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
hash := externalapi.NewDomainHashFromByteArray(&hashBytes)
expectedMatrix := matrix{
{4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 14, 1, 2, 2, 14, 10, 4, 12, 4, 12, 10, 10, 10, 10, 10, 10},
{9, 11, 1, 11, 1, 11, 9, 11, 9, 11, 9, 3, 12, 13, 11, 5, 15, 15, 5, 0, 6, 8, 1, 8, 6, 11, 15, 5, 3, 6, 7, 3, 2, 15, 14, 3, 7, 11, 14, 7, 3, 6, 14, 12, 3, 9, 5, 1, 1, 0, 8, 4, 10, 15, 9, 10, 6, 13, 1, 1, 7, 4, 4, 6},
{2, 6, 0, 8, 11, 15, 4, 0, 5, 2, 7, 13, 15, 3, 11, 12, 6, 2, 1, 8, 13, 4, 11, 4, 10, 14, 13, 2, 6, 15, 10, 6, 6, 5, 6, 9, 3, 3, 3, 1, 9, 12, 12, 15, 6, 0, 1, 5, 7, 13, 14, 1, 10, 10, 5, 14, 4, 0, 12, 13, 2, 15, 8, 4},
{8, 6, 5, 1, 0, 6, 4, 8, 13, 0, 8, 12, 7, 2, 4, 3, 10, 5, 9, 3, 12, 13, 2, 4, 13, 14, 7, 7, 9, 12, 10, 8, 11, 6, 14, 3, 12, 8, 8, 0, 2, 10, 0, 9, 1, 9, 7, 8, 5, 2, 9, 13, 15, 6, 13, 10, 1, 9, 1, 10, 6, 2, 10, 9},
{4, 2, 6, 14, 4, 2, 5, 7, 15, 6, 0, 4, 11, 9, 12, 0, 3, 2, 0, 4, 10, 5, 12, 3, 3, 4, 10, 1, 0, 13, 3, 12, 15, 0, 7, 10, 2, 2, 15, 0, 2, 15, 8, 2, 15, 12, 10, 6, 6, 2, 13, 3, 8, 14, 3, 13, 10, 5, 4, 5, 1, 6, 5, 10},
{0, 3, 13, 12, 11, 4, 11, 13, 1, 12, 4, 11, 15, 14, 13, 4, 7, 1, 3, 0, 10, 3, 8, 8, 1, 2, 5, 14, 4, 5, 14, 1, 1, 3, 3, 1, 5, 15, 7, 5, 11, 8, 8, 12, 10, 5, 7, 9, 2, 10, 13, 11, 4, 2, 12, 15, 10, 6, 6, 0, 6, 6, 3, 12},
{9, 12, 3, 3, 5, 8, 12, 13, 7, 4, 5, 11, 4, 0, 7, 2, 2, 15, 12, 14, 12, 5, 4, 2, 8, 8, 8, 13, 6, 1, 1, 5, 0, 15, 12, 13, 8, 5, 0, 4, 13, 1, 6, 1, 12, 14, 1, 0, 13, 12, 10, 10, 1, 4, 13, 13, 8, 4, 15, 13, 6, 6, 14, 10},
{14, 15, 8, 0, 7, 2, 5, 10, 5, 3, 12, 0, 11, 3, 4, 2, 8, 11, 6, 14, 14, 3, 3, 12, 3, 7, 6, 2, 6, 12, 15, 1, 1, 13, 0, 6, 9, 9, 7, 7, 13, 4, 4, 2, 15, 5, 2, 15, 13, 13, 10, 6, 9, 15, 2, 9, 6, 10, 6, 14, 14, 3, 5, 11},
{6, 4, 7, 8, 11, 0, 13, 11, 0, 7, 0, 0, 13, 6, 3, 11, 15, 14, 10, 2, 7, 8, 13, 14, 8, 15, 10, 8, 14, 6, 10, 14, 3, 11, 5, 11, 13, 5, 3, 12, 3, 0, 2, 0, 6, 14, 4, 12, 4, 4, 8, 15, 7, 8, 12, 11, 3, 9, 5, 13, 10, 14, 13, 4},
{10, 0, 0, 15, 1, 4, 13, 3, 15, 10, 2, 5, 11, 2, 9, 14, 7, 3, 2, 8, 6, 15, 0, 12, 1, 4, 1, 9, 3, 0, 15, 8, 9, 13, 0, 7, 9, 10, 6, 14, 3, 7, 9, 7, 4, 0, 11, 8, 4, 6, 5, 8, 8, 0, 5, 14, 7, 12, 12, 2, 5, 6, 5, 6},
{12, 0, 0, 14, 8, 3, 0, 3, 13, 10, 5, 13, 5, 7, 2, 4, 13, 11, 3, 1, 11, 2, 14, 5, 10, 5, 5, 9, 12, 15, 12, 8, 1, 0, 11, 13, 8, 1, 1, 11, 10, 0, 11, 15, 13, 9, 12, 14, 5, 4, 5, 14, 2, 7, 2, 1, 4, 12, 11, 11, 9, 12, 11, 15},
{3, 15, 9, 8, 13, 12, 15, 7, 8, 7, 14, 6, 10, 3, 0, 5, 2, 2, 6, 6, 3, 2, 5, 12, 11, 2, 10, 11, 13, 3, 9, 7, 7, 6, 8, 15, 14, 14, 11, 11, 9, 7, 1, 3, 8, 5, 11, 11, 1, 2, 15, 8, 13, 8, 11, 4, 1, 5, 3, 12, 5, 3, 7, 7},
{13, 13, 2, 14, 4, 3, 15, 2, 0, 15, 1, 5, 4, 1, 5, 1, 4, 14, 5, 1, 11, 13, 15, 1, 3, 3, 5, 13, 14, 1, 0, 4, 6, 1, 15, 7, 7, 0, 15, 8, 15, 3, 14, 7, 7, 8, 12, 10, 2, 14, 9, 2, 11, 11, 7, 10, 4, 3, 12, 13, 4, 13, 0, 14},
{12, 14, 15, 15, 2, 0, 0, 13, 4, 6, 4, 2, 14, 11, 5, 6, 14, 8, 14, 7, 13, 15, 6, 15, 7, 9, 1, 0, 11, 9, 9, 0, 2, 12, 8, 8, 14, 11, 7, 5, 3, 0, 11, 12, 9, 2, 8, 9, 0, 0, 9, 8, 9, 8, 2, 14, 12, 2, 0, 14, 13, 8, 4, 10},
{7, 10, 1, 15, 12, 14, 7, 4, 7, 13, 4, 8, 13, 12, 1, 7, 10, 6, 5, 14, 14, 3, 14, 4, 11, 14, 6, 12, 15, 12, 15, 12, 4, 5, 9, 8, 7, 7, 3, 0, 5, 7, 3, 8, 4, 4, 7, 5, 6, 12, 13, 0, 12, 10, 2, 5, 14, 9, 6, 4, 13, 13, 14, 5},
{14, 5, 8, 3, 4, 15, 13, 14, 14, 10, 7, 14, 15, 2, 11, 14, 13, 13, 12, 10, 6, 9, 5, 5, 6, 13, 15, 13, 7, 0, 15, 11, 4, 12, 15, 7, 7, 4, 3, 11, 8, 14, 5, 10, 2, 4, 4, 12, 3, 6, 1, 9, 15, 1, 1, 13, 7, 5, 0, 14, 15, 7, 8, 6},
{1, 2, 10, 5, 2, 13, 1, 11, 15, 10, 4, 9, 9, 12, 14, 13, 3, 5, 0, 3, 7, 11, 10, 3, 12, 5, 10, 2, 13, 7, 1, 7, 13, 8, 2, 8, 3, 14, 10, 3, 5, 12, 0, 9, 3, 9, 11, 2, 10, 9, 0, 6, 4, 0, 1, 14, 11, 0, 8, 6, 1, 15, 3, 10},
{13, 9, 0, 5, 8, 7, 12, 15, 10, 10, 5, 1, 1, 7, 6, 1, 14, 5, 15, 2, 3, 5, 3, 5, 7, 3, 7, 7, 1, 4, 3, 14, 5, 0, 12, 0, 12, 10, 10, 6, 12, 6, 3, 5, 5, 11, 10, 1, 11, 3, 13, 3, 9, 11, 1, 7, 14, 14, 0, 8, 15, 5, 2, 7},
{8, 5, 11, 6, 15, 0, 1, 13, 1, 6, 7, 15, 4, 3, 14, 12, 9, 3, 11, 6, 4, 12, 1, 11, 6, 12, 5, 11, 1, 12, 2, 3, 1, 2, 11, 12, 0, 5, 11, 5, 3, 13, 11, 3, 11, 14, 10, 8, 3, 9, 4, 8, 13, 11, 9, 11, 2, 4, 12, 3, 0, 14, 7, 11},
{10, 11, 4, 10, 7, 8, 3, 14, 15, 8, 15, 6, 9, 8, 5, 6, 12, 1, 15, 6, 5, 5, 14, 13, 2, 12, 14, 6, 5, 5, 14, 9, 1, 10, 11, 14, 8, 6, 14, 11, 1, 15, 6, 11, 11, 8, 1, 2, 8, 5, 4, 15, 6, 8, 0, 8, 0, 11, 0, 1, 0, 7, 8, 15},
{0, 15, 5, 0, 11, 4, 4, 2, 0, 4, 8, 12, 2, 2, 0, 8, 1, 2, 6, 5, 6, 12, 3, 1, 12, 1, 6, 10, 2, 5, 0, 2, 0, 11, 8, 6, 13, 4, 14, 4, 15, 5, 8, 11, 9, 6, 2, 6, 9, 1, 4, 2, 14, 10, 4, 4, 1, 1, 11, 8, 6, 11, 11, 9},
{7, 3, 6, 5, 9, 1, 11, 0, 15, 13, 13, 13, 4, 14, 14, 12, 3, 7, 9, 3, 1, 6, 5, 9, 7, 6, 2, 11, 10, 4, 11, 14, 10, 13, 11, 8, 11, 8, 1, 15, 5, 0, 10, 5, 6, 0, 5, 15, 11, 6, 6, 4, 10, 11, 8, 12, 0, 10, 11, 11, 11, 1, 13, 6},
{7, 15, 0, 0, 11, 5, 7, 13, 3, 7, 3, 2, 5, 12, 6, 11, 14, 4, 9, 8, 9, 9, 13, 0, 15, 2, 13, 2, 15, 6, 15, 1, 1, 7, 4, 0, 10, 1, 8, 14, 0, 10, 12, 4, 5, 13, 9, 0, 7, 12, 13, 11, 11, 8, 8, 15, 2, 15, 4, 4, 9, 3, 10, 7},
{0, 9, 3, 5, 14, 6, 7, 14, 7, 2, 13, 7, 3, 15, 9, 15, 2, 8, 0, 4, 6, 0, 15, 6, 2, 1, 14, 8, 5, 8, 2, 4, 2, 11, 9, 2, 15, 13, 11, 12, 8, 15, 3, 13, 2, 2, 10, 13, 1, 8, 7, 15, 13, 6, 7, 7, 4, 3, 14, 7, 0, 9, 15, 11},
{8, 13, 7, 7, 8, 8, 7, 8, 1, 4, 10, 1, 12, 4, 14, 11, 7, 12, 15, 0, 10, 15, 9, 2, 14, 2, 14, 2, 4, 5, 13, 3, 2, 10, 0, 15, 7, 6, 8, 11, 7, 6, 10, 10, 4, 7, 10, 6, 6, 14, 10, 4, 14, 6, 12, 2, 8, 1, 9, 13, 3, 4, 3, 14},
{10, 10, 6, 3, 8, 5, 10, 7, 11, 10, 9, 4, 8, 14, 9, 10, 0, 9, 8, 14, 11, 15, 8, 13, 13, 7, 13, 13, 13, 9, 12, 11, 6, 3, 9, 6, 0, 0, 6, 6, 11, 6, 4, 8, 1, 5, 1, 7, 9, 6, 13, 4, 3, 8, 8, 11, 9, 10, 6, 11, 12, 13, 14, 14},
{14, 10, 0, 15, 14, 4, 3, 0, 12, 4, 0, 14, 11, 9, 0, 6, 4, 6, 0, 9, 8, 14, 4, 4, 6, 8, 2, 8, 10, 3, 8, 0, 1, 1, 15, 4, 2, 4, 13, 9, 9, 4, 0, 5, 5, 1, 2, 5, 11, 6, 2, 1, 7, 8, 10, 10, 1, 5, 8, 6, 7, 0, 4, 14},
{0, 15, 10, 11, 13, 12, 7, 7, 4, 0, 9, 5, 2, 8, 0, 10, 6, 6, 7, 5, 6, 7, 9, 0, 1, 4, 8, 14, 10, 3, 5, 5, 11, 5, 1, 10, 6, 10, 0, 14, 1, 15, 11, 12, 8, 2, 7, 8, 4, 0, 3, 11, 9, 15, 3, 5, 15, 15, 14, 15, 3, 4, 5, 14},
{5, 12, 12, 8, 0, 0, 14, 1, 4, 15, 3, 2, 2, 6, 1, 10, 7, 10, 14, 5, 14, 0, 8, 5, 9, 0, 12, 8, 9, 10, 3, 12, 3, 2, 0, 0, 12, 12, 7, 13, 2, 6, 4, 7, 10, 10, 14, 1, 11, 6, 10, 3, 12, 2, 1, 10, 7, 13, 10, 12, 14, 11, 14, 8},
{9, 5, 3, 12, 4, 3, 10, 14, 7, 5, 11, 12, 2, 13, 9, 8, 5, 2, 6, 2, 4, 9, 10, 10, 4, 3, 4, 0, 11, 1, 10, 9, 4, 10, 4, 5, 8, 11, 1, 7, 13, 7, 6, 6, 3, 12, 0, 0, 15, 6, 12, 12, 13, 7, 14, 14, 11, 15, 7, 14, 12, 6, 15, 2},
{15, 2, 0, 12, 15, 14, 8, 14, 7, 14, 0, 3, 3, 11, 12, 2, 3, 14, 13, 5, 12, 9, 6, 11, 7, 4, 5, 1, 7, 12, 0, 11, 1, 5, 6, 6, 8, 6, 12, 2, 12, 3, 10, 3, 4, 10, 3, 3, 3, 10, 10, 14, 3, 13, 15, 0, 7, 6, 15, 6, 13, 7, 4, 11},
{11, 15, 5, 14, 0, 1, 1, 14, 2, 3, 15, 14, 4, 3, 11, 1, 6, 6, 0, 12, 3, 5, 15, 6, 3, 11, 13, 11, 7, 7, 8, 11, 5, 9, 10, 10, 9, 14, 7, 1, 7, 2, 8, 6, 6, 5, 1, 9, 6, 5, 8, 14, 2, 14, 2, 9, 3, 3, 4, 15, 13, 5, 2, 7},
{7, 8, 13, 9, 15, 8, 11, 7, 1, 9, 15, 12, 6, 9, 3, 1, 10, 10, 11, 0, 0, 8, 14, 5, 11, 12, 14, 4, 3, 9, 12, 9, 14, 0, 0, 9, 12, 4, 1, 13, 3, 6, 3, 4, 13, 10, 2, 9, 3, 7, 7, 10, 7, 10, 10, 3, 5, 15, 8, 9, 11, 7, 1, 14},
{5, 5, 9, 1, 15, 3, 3, 11, 6, 11, 13, 13, 4, 12, 7, 12, 4, 8, 14, 13, 7, 12, 13, 8, 10, 2, 1, 12, 11, 7, 0, 8, 10, 9, 15, 1, 3, 9, 10, 0, 9, 1, 14, 1, 1, 9, 2, 2, 8, 9, 5, 6, 3, 2, 15, 9, 15, 6, 3, 11, 14, 4, 0, 4},
{9, 2, 10, 2, 0, 9, 6, 13, 13, 0, 13, 14, 3, 12, 1, 15, 9, 3, 12, 2, 5, 15, 6, 6, 15, 11, 7, 11, 0, 4, 0, 11, 10, 12, 7, 9, 3, 0, 2, 2, 13, 13, 9, 6, 9, 2, 6, 4, 3, 6, 5, 10, 10, 9, 7, 2, 4, 9, 13, 11, 2, 13, 6, 8},
{13, 15, 9, 8, 6, 2, 3, 2, 2, 12, 5, 3, 8, 6, 11, 6, 15, 7, 10, 3, 15, 8, 7, 5, 3, 8, 4, 2, 11, 1, 0, 4, 1, 1, 6, 1, 13, 6, 5, 1, 2, 6, 7, 10, 4, 3, 10, 6, 2, 0, 7, 13, 15, 1, 13, 0, 12, 10, 15, 6, 2, 4, 14, 3},
{5, 11, 14, 4, 0, 7, 12, 4, 4, 14, 12, 3, 4, 10, 7, 14, 6, 4, 14, 7, 0, 12, 5, 9, 15, 6, 15, 6, 3, 12, 0, 10, 11, 7, 1, 14, 13, 5, 1, 14, 5, 15, 12, 1, 9, 13, 9, 13, 14, 5, 10, 11, 12, 10, 15, 11, 9, 13, 2, 14, 9, 12, 2, 11},
{2, 12, 5, 7, 1, 5, 2, 11, 8, 4, 15, 6, 9, 14, 5, 1, 15, 4, 3, 1, 11, 4, 2, 1, 4, 5, 4, 4, 7, 3, 3, 12, 4, 3, 2, 15, 13, 1, 14, 15, 1, 4, 6, 11, 13, 15, 6, 12, 12, 13, 6, 8, 10, 0, 10, 12, 1, 10, 3, 2, 9, 8, 2, 8},
{10, 12, 12, 6, 8, 5, 4, 4, 5, 3, 6, 7, 15, 5, 10, 3, 8, 15, 14, 5, 6, 2, 14, 4, 1, 7, 1, 3, 12, 3, 12, 4, 10, 15, 6, 6, 0, 6, 6, 8, 6, 9, 5, 7, 5, 1, 9, 2, 4, 9, 0, 8, 1, 1, 14, 3, 7, 14, 8, 9, 0, 4, 11, 7},
{13, 11, 14, 7, 0, 4, 0, 10, 12, 11, 10, 8, 6, 12, 13, 15, 9, 2, 14, 9, 3, 0, 12, 14, 11, 15, 4, 7, 15, 14, 4, 8, 15, 12, 9, 14, 7, 7, 9, 13, 14, 14, 4, 9, 13, 8, 1, 13, 6, 3, 12, 7, 0, 15, 6, 15, 7, 2, 3, 0, 9, 5, 13, 0},
{3, 8, 12, 11, 5, 9, 9, 14, 8, 14, 14, 5, 9, 9, 12, 10, 3, 12, 13, 0, 0, 0, 6, 7, 12, 4, 2, 3, 8, 8, 9, 15, 11, 1, 12, 13, 10, 15, 11, 1, 2, 13, 10, 1, 7, 2, 7, 11, 8, 15, 7, 6, 4, 6, 5, 11, 11, 15, 2, 1, 11, 1, 1, 8},
{10, 7, 7, 1, 4, 13, 9, 10, 2, 2, 3, 7, 12, 8, 5, 5, 5, 5, 3, 1, 5, 6, 8, 2, 8, 11, 5, 0, 4, 12, 12, 6, 7, 9, 14, 10, 11, 8, 0, 9, 11, 4, 14, 7, 7, 8, 2, 15, 12, 7, 4, 4, 13, 2, 0, 3, 14, 0, 1, 5, 2, 15, 7, 11},
{3, 8, 10, 4, 1, 7, 3, 13, 5, 14, 0, 9, 3, 1, 0, 11, 2, 15, 4, 9, 6, 5, 14, 0, 2, 8, 1, 14, 7, 6, 1, 5, 5, 7, 2, 0, 5, 3, 4, 15, 13, 10, 9, 13, 13, 12, 5, 11, 11, 14, 13, 10, 8, 14, 0, 8, 1, 7, 2, 10, 12, 12, 1, 11},
{11, 14, 4, 13, 3, 11, 10, 6, 15, 2, 5, 10, 14, 4, 13, 3, 12, 7, 12, 10, 4, 0, 0, 1, 14, 6, 1, 2, 2, 12, 9, 2, 3, 11, 1, 4, 10, 4, 4, 7, 7, 12, 4, 3, 12, 11, 9, 3, 15, 13, 6, 13, 7, 11, 5, 12, 5, 13, 15, 12, 0, 13, 12, 9},
{8, 7, 2, 2, 5, 3, 10, 15, 10, 8, 1, 0, 4, 5, 7, 6, 15, 13, 2, 14, 6, 2, 9, 5, 9, 0, 5, 12, 8, 6, 4, 12, 6, 8, 14, 15, 7, 15, 11, 2, 2, 12, 7, 9, 7, 11, 15, 7, 0, 4, 5, 13, 7, 2, 5, 9, 0, 5, 7, 6, 7, 12, 4, 1},
{11, 4, 2, 13, 6, 10, 9, 4, 12, 9, 9, 6, 4, 2, 14, 14, 9, 5, 5, 15, 15, 9, 8, 11, 4, 2, 8, 11, 14, 3, 8, 10, 14, 9, 6, 6, 4, 7, 11, 2, 3, 7, 5, 1, 14, 2, 9, 4, 0, 1, 10, 7, 6, 7, 1, 3, 13, 7, 3, 2, 12, 3, 6, 6},
{11, 1, 14, 3, 14, 3, 9, 9, 0, 11, 14, 6, 14, 7, 14, 8, 4, 2, 5, 6, 13, 3, 4, 10, 8, 8, 10, 11, 5, 1, 15, 15, 7, 0, 4, 14, 15, 13, 14, 13, 3, 2, 1, 6, 0, 6, 6, 4, 15, 6, 0, 12, 5, 11, 1, 7, 3, 3, 13, 12, 12, 6, 3, 2},
{7, 2, 10, 14, 14, 13, 4, 14, 10, 6, 0, 2, 7, 7, 2, 5, 14, 1, 5, 14, 15, 1, 2, 9, 2, 13, 1, 3, 6, 1, 3, 13, 10, 6, 11, 13, 1, 7, 13, 15, 2, 11, 9, 6, 13, 7, 9, 2, 3, 13, 10, 10, 6, 2, 5, 9, 1, 3, 0, 3, 1, 5, 3, 12},
{11, 14, 4, 2, 10, 11, 15, 5, 9, 7, 8, 11, 10, 9, 5, 7, 14, 3, 12, 2, 7, 15, 12, 15, 4, 15, 12, 9, 2, 6, 6, 6, 8, 5, 0, 7, 14, 15, 14, 14, 3, 12, 7, 12, 2, 4, 1, 7, 1, 3, 4, 7, 1, 9, 11, 15, 15, 3, 7, 1, 10, 9, 14, 14},
{4, 13, 11, 1, 9, 6, 5, 1, 11, 6, 6, 8, 3, 9, 8, 15, 13, 12, 3, 13, 5, 9, 10, 5, 12, 1, 15, 14, 12, 1, 10, 11, 5, 7, 3, 12, 9, 12, 0, 2, 2, 3, 14, 4, 2, 13, 1, 15, 11, 8, 3, 13, 0, 10, 5, 4, 6, 0, 14, 8, 1, 0, 6, 15},
{15, 2, 0, 5, 2, 14, 9, 0, 10, 5, 12, 8, 5, 6, 0, 1, 9, 4, 4, 1, 4, 6, 14, 5, 3, 0, 2, 2, 14, 9, 7, 0, 2, 15, 12, 0, 10, 12, 9, 12, 15, 1, 9, 4, 15, 3, 0, 13, 0, 6, 5, 0, 2, 6, 11, 9, 13, 15, 6, 3, 5, 4, 0, 8},
{4, 14, 8, 14, 13, 4, 4, 10, 6, 12, 15, 11, 7, 2, 15, 6, 9, 9, 1, 11, 13, 2, 7, 10, 4, 4, 5, 12, 14, 15, 8, 5, 6, 1, 11, 15, 4, 11, 5, 2, 5, 7, 3, 4, 5, 7, 3, 8, 10, 13, 7, 5, 6, 5, 10, 1, 12, 13, 3, 6, 2, 8, 7, 15},
{3, 15, 4, 9, 14, 12, 6, 1, 7, 0, 7, 15, 10, 6, 5, 5, 15, 5, 9, 4, 7, 6, 14, 2, 1, 4, 10, 3, 12, 1, 7, 1, 0, 10, 2, 11, 14, 13, 7, 10, 5, 11, 5, 11, 15, 5, 0, 3, 15, 1, 2, 14, 13, 13, 10, 9, 15, 12, 10, 5, 2, 10, 0, 6},
{4, 6, 5, 13, 11, 10, 15, 4, 2, 15, 13, 6, 7, 7, 4, 0, 4, 6, 7, 4, 9, 1, 6, 7, 6, 1, 4, 2, 0, 11, 6, 3, 14, 5, 9, 2, 2, 10, 1, 2, 13, 14, 4, 11, 4, 7, 12, 9, 8, 2, 2, 9, 5, 7, 9, 12, 8, 15, 0, 9, 12, 11, 1, 12},
{11, 12, 11, 9, 8, 15, 4, 12, 13, 10, 6, 6, 6, 12, 3, 0, 6, 15, 15, 10, 6, 12, 5, 7, 10, 2, 7, 1, 6, 12, 9, 11, 11, 14, 1, 12, 15, 0, 6, 2, 12, 15, 4, 15, 14, 8, 3, 4, 15, 4, 13, 3, 14, 1, 3, 7, 6, 13, 9, 1, 0, 12, 4, 14},
{12, 11, 13, 10, 10, 10, 3, 7, 12, 3, 13, 9, 6, 0, 12, 10, 4, 11, 5, 4, 11, 5, 7, 14, 6, 10, 12, 12, 13, 15, 12, 1, 13, 15, 15, 7, 1, 2, 8, 6, 1, 12, 12, 0, 4, 3, 3, 3, 7, 8, 9, 10, 7, 7, 0, 0, 11, 13, 15, 4, 9, 5, 10, 9},
{6, 12, 3, 0, 9, 11, 6, 4, 9, 9, 1, 5, 9, 14, 3, 7, 15, 3, 5, 0, 5, 11, 7, 6, 13, 5, 10, 2, 12, 10, 2, 6, 0, 1, 1, 13, 9, 3, 11, 7, 8, 2, 10, 9, 13, 6, 6, 4, 12, 0, 3, 10, 9, 4, 15, 11, 14, 1, 9, 3, 0, 14, 6, 1},
{11, 14, 10, 10, 11, 6, 4, 7, 10, 0, 7, 9, 3, 2, 13, 13, 9, 9, 2, 3, 3, 14, 10, 4, 14, 1, 10, 7, 14, 4, 9, 15, 3, 11, 5, 10, 7, 8, 3, 0, 1, 2, 2, 3, 12, 9, 6, 2, 11, 15, 3, 9, 3, 6, 8, 0, 4, 5, 7, 3, 0, 14, 7, 9},
{4, 11, 13, 12, 6, 2, 3, 15, 15, 3, 5, 1, 0, 5, 10, 2, 5, 3, 7, 10, 15, 0, 5, 3, 2, 10, 12, 10, 8, 3, 9, 15, 5, 3, 7, 13, 5, 7, 13, 12, 5, 10, 2, 9, 10, 1, 9, 4, 14, 1, 10, 13, 1, 2, 2, 12, 5, 3, 14, 7, 7, 8, 13, 13},
{10, 12, 11, 10, 0, 15, 4, 3, 0, 8, 3, 0, 15, 0, 3, 10, 10, 9, 15, 3, 13, 3, 8, 3, 8, 2, 14, 7, 1, 6, 13, 8, 2, 2, 12, 3, 3, 0, 10, 12, 0, 1, 1, 7, 5, 0, 13, 10, 7, 13, 9, 9, 13, 7, 0, 1, 0, 2, 14, 2, 13, 0, 8, 3},
{11, 3, 11, 10, 12, 15, 11, 6, 14, 8, 8, 5, 7, 11, 3, 1, 13, 7, 13, 4, 15, 7, 2, 3, 8, 7, 3, 8, 9, 15, 10, 15, 9, 0, 5, 4, 1, 7, 13, 8, 2, 7, 1, 10, 1, 12, 12, 1, 7, 12, 13, 5, 14, 10, 9, 15, 12, 2, 10, 3, 10, 3, 9, 12},
{9, 8, 11, 0, 5, 6, 1, 5, 9, 1, 0, 12, 12, 0, 12, 11, 2, 8, 4, 0, 1, 7, 7, 5, 1, 14, 1, 9, 13, 7, 2, 12, 8, 9, 12, 13, 1, 11, 5, 3, 12, 14, 15, 4, 9, 8, 12, 7, 11, 1, 3, 9, 11, 5, 7, 14, 4, 6, 12, 3, 4, 12, 7, 9},
{10, 12, 2, 14, 14, 1, 11, 8, 3, 7, 13, 7, 2, 1, 14, 13, 7, 6, 15, 8, 15, 12, 13, 10, 11, 15, 4, 2, 6, 13, 12, 3, 2, 10, 15, 14, 10, 11, 8, 14, 9, 3, 12, 9, 15, 2, 14, 14, 5, 13, 7, 6, 2, 1, 1, 4, 1, 0, 13, 10, 1, 0, 2, 9},
{10, 5, 11, 14, 12, 1, 12, 7, 12, 8, 10, 5, 6, 10, 0, 7, 5, 6, 11, 11, 13, 12, 0, 13, 0, 6, 11, 0, 14, 4, 2, 1, 12, 7, 1, 10, 7, 15, 5, 3, 14, 15, 1, 3, 1, 2, 10, 4, 11, 8, 2, 11, 2, 5, 5, 4, 15, 5, 10, 3, 1, 7, 2, 14},
}
mat := generateMatrix(hash)
if *mat != expectedMatrix {
t.Fatal("The generated matrix doesn't match the test vector")
}
}
func TestMatrix_HeavyHash(t *testing.T) {
//expected, err := hex.DecodeString("39387f2e64e7c08d3ce0da8c491b4fcf2c862798dedb4690d819de7926aa4ecb") -> Keccak
expected, err := hex.DecodeString("137c69e3c4707cef02eeb8633d8a91fb5f62801342c9bc7e9514695c98e08b08")
expected, err := hex.DecodeString("87689f379943eaf9b7475ca95325687772bfcc68fc7899caeb4409ec4590c325")
if err != nil {
t.Fatal(err)
}

View File

@@ -12,52 +12,77 @@ import (
"math/big"
)
// CheckProofOfWorkWithTarget check's if the block has a valid PoW according to the provided target
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool {
// The block pow must be less than the claimed target
powNum := CalculateProofOfWorkValue(header)
// The block hash must be less or equal than the claimed target.
return powNum.Cmp(target) <= 0
// State is an intermediate data structure with pre-computed values to speed up mining.
type State struct {
mat matrix
Timestamp int64
Nonce uint64
Target big.Int
prePowHash externalapi.DomainHash
}
// CheckProofOfWorkByBits check's if the block has a valid PoW according to its Bits field
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool {
return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits()))
}
// CalculateProofOfWorkValue hashes the given header and returns its big.Int value
func CalculateProofOfWorkValue(header externalapi.MutableBlockHeader) *big.Int {
// NewState creates a new state with pre-computed values to speed up mining
// It takes the target from the Bits field
func NewState(header externalapi.MutableBlockHeader) *State {
target := difficulty.CompactToBig(header.Bits())
// Zero out the time and nonce.
timestamp, nonce := header.TimeInMilliseconds(), header.Nonce()
header.SetTimeInMilliseconds(0)
header.SetNonce(0)
prePowHash := consensushashing.HeaderHash(header)
header.SetTimeInMilliseconds(timestamp)
header.SetNonce(nonce)
return &State{
Target: *target,
prePowHash: *prePowHash,
mat: *generateMatrix(prePowHash),
Timestamp: timestamp,
Nonce: nonce,
}
}
// CalculateProofOfWorkValue hashes the internal header and returns its big.Int value
func (state *State) CalculateProofOfWorkValue() *big.Int {
// PRE_POW_HASH || TIME || 32 zero byte padding || NONCE
writer := hashes.NewPoWHashWriter()
writer.InfallibleWrite(prePowHash.ByteSlice())
err := serialization.WriteElement(writer, timestamp)
writer.InfallibleWrite(state.prePowHash.ByteSlice())
err := serialization.WriteElement(writer, state.Timestamp)
if err != nil {
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
}
zeroes := [32]byte{}
writer.InfallibleWrite(zeroes[:])
err = serialization.WriteElement(writer, nonce)
err = serialization.WriteElement(writer, state.Nonce)
if err != nil {
panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error"))
}
powHash := writer.Finalize()
matrix := generateMatrix(powHash)
heavyHash := matrix.HeavyHash(powHash)
heavyHash := state.mat.HeavyHash(powHash)
return toBig(heavyHash)
}
// IncrementNonce the nonce in State by 1
func (state *State) IncrementNonce() {
state.Nonce++
}
// CheckProofOfWork check's if the block has a valid PoW according to the provided target
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func (state *State) CheckProofOfWork() bool {
// The block pow must be less than the claimed target
powNum := state.CalculateProofOfWorkValue()
// The block hash must be less or equal than the claimed target.
return powNum.Cmp(&state.Target) <= 0
}
// CheckProofOfWorkByBits check's if the block has a valid PoW according to its Bits field
// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network
func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool {
return NewState(header).CheckProofOfWork()
}
// ToBig converts a externalapi.DomainHash into a big.Int treated as a little endian string.
func toBig(hash *externalapi.DomainHash) *big.Int {
// We treat the Hash as little-endian for PoW purposes, but the big package wants the bytes in big-endian, so reverse them.
@@ -72,10 +97,17 @@ func toBig(hash *externalapi.DomainHash) *big.Int {
// BlockLevel returns the block level of the given header.
func BlockLevel(header externalapi.BlockHeader) int {
proofOfWorkValue := CalculateProofOfWorkValue(header.ToMutable())
for blockLevel := 0; ; blockLevel++ {
if blockLevel == constants.MaxBlockLevel || proofOfWorkValue.Bit(blockLevel+1) != 0 {
return blockLevel
}
// Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal
// block level.
if len(header.DirectParents()) == 0 {
return constants.MaxBlockLevel
}
proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue()
level := constants.MaxBlockLevel - proofOfWorkValue.BitLen()
// If the block has a level lower than genesis make it zero.
if level < 0 {
level = 0
}
return level
}

View File

@@ -22,7 +22,7 @@ import (
//
const (
defaultMaxCoinbasePayloadLength = 150
defaultMaxCoinbasePayloadLength = 204
// defaultMaxBlockMass is a bound on the mass of a block, larger values increase the bound d
// on the round trip time of a block, which affects the other parameters as described below
defaultMaxBlockMass = 500_000
@@ -49,7 +49,7 @@ const (
defaultMergeSetSizeLimit = defaultGHOSTDAGK * 10
defaultSubsidyGenesisReward = 1 * constants.SompiPerKaspa
defaultMinSubsidy = 1 * constants.SompiPerKaspa
defaultMaxSubsidy = 1000 * constants.SompiPerKaspa
defaultMaxSubsidy = 500 * constants.SompiPerKaspa
defaultBaseSubsidy = 50 * constants.SompiPerKaspa
defaultFixedSubsidySwitchPruningPointInterval uint64 = 7
defaultCoinbasePayloadScriptPublicKeyMaxLength = 150

View File

@@ -19,8 +19,31 @@ var genesisTxPayload = []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score
0x00, 0xE1, 0xF5, 0x05, 0x00, 0x00, 0x00, 0x00, // Subsidy
0x00, 0x00, //script version
0x01, // Varint
0x00, // OP-FALSE
0x01, // Varint
0x00, // OP-FALSE
0xd7, 0x95, 0xd7, 0x9e, 0xd7, 0x94, 0x20, 0xd7, // ומה די עליך ועל אחיך ייטב בשאר כספא ודהבה למעבד כרעות אלהכם תעבדון
0x93, 0xd7, 0x99, 0x20, 0xd7, 0xa2, 0xd7, 0x9c,
0xd7, 0x99, 0xd7, 0x9a, 0x20, 0xd7, 0x95, 0xd7,
0xa2, 0xd7, 0x9c, 0x20, 0xd7, 0x90, 0xd7, 0x97,
0xd7, 0x99, 0xd7, 0x9a, 0x20, 0xd7, 0x99, 0xd7,
0x99, 0xd7, 0x98, 0xd7, 0x91, 0x20, 0xd7, 0x91,
0xd7, 0xa9, 0xd7, 0x90, 0xd7, 0xa8, 0x20, 0xd7,
0x9b, 0xd7, 0xa1, 0xd7, 0xa4, 0xd7, 0x90, 0x20,
0xd7, 0x95, 0xd7, 0x93, 0xd7, 0x94, 0xd7, 0x91,
0xd7, 0x94, 0x20, 0xd7, 0x9c, 0xd7, 0x9e, 0xd7,
0xa2, 0xd7, 0x91, 0xd7, 0x93, 0x20, 0xd7, 0x9b,
0xd7, 0xa8, 0xd7, 0xa2, 0xd7, 0x95, 0xd7, 0xaa,
0x20, 0xd7, 0x90, 0xd7, 0x9c, 0xd7, 0x94, 0xd7,
0x9b, 0xd7, 0x9d, 0x20, 0xd7, 0xaa, 0xd7, 0xa2,
0xd7, 0x91, 0xd7, 0x93, 0xd7, 0x95, 0xd7, 0x9f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Bitcoin block hash 0000000000000000000b1f8e1c17b0133d439174e52efbb0c41c3583a8aa66b0
0x00, 0x0b, 0x1f, 0x8e, 0x1c, 0x17, 0xb0, 0x13,
0x3d, 0x43, 0x91, 0x74, 0xe5, 0x2e, 0xfb, 0xb0,
0xc4, 0x1c, 0x35, 0x83, 0xa8, 0xaa, 0x66, 0xb0,
0x0f, 0xca, 0x37, 0xca, 0x66, 0x7c, 0x2d, 0x55, // Checkpoint block hash 0fca37ca667c2d550a6c4416dad9717e50927128c424fa4edbebc436ab13aeef
0x0a, 0x6c, 0x44, 0x16, 0xda, 0xd9, 0x71, 0x7e,
0x50, 0x92, 0x71, 0x28, 0xc4, 0x24, 0xfa, 0x4e,
0xdb, 0xeb, 0xc4, 0x36, 0xab, 0x13, 0xae, 0xef,
}
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
@@ -31,19 +54,13 @@ var genesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, []*externa
// genesisHash is the hash of the first block in the block DAG for the main
// network (genesis block).
var genesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x75, 0xd8, 0x51, 0xcc, 0x91, 0xba, 0x55, 0x32,
0xfd, 0xf9, 0x3e, 0xf2, 0xa5, 0x28, 0x92, 0x83,
0x35, 0xe0, 0x61, 0xa2, 0xe4, 0x77, 0x76, 0x5e,
0xd7, 0x10, 0x5d, 0x8e, 0x78, 0xc7, 0xb8, 0x0d,
0x58, 0xc2, 0xd4, 0x19, 0x9e, 0x21, 0xf9, 0x10, 0xd1, 0x57, 0x1d, 0x11, 0x49, 0x69, 0xce, 0xce, 0xf4, 0x8f, 0x9, 0xf9, 0x34, 0xd4, 0x2c, 0xcb, 0x6a, 0x28, 0x1a, 0x15, 0x86, 0x8f, 0x29, 0x99,
})
// genesisMerkleRoot is the hash of the first transaction in the genesis block
// for the main network.
var genesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0xea, 0xb2, 0xe3, 0xfa, 0xd6, 0x72, 0x95, 0x38,
0x04, 0xe0, 0x43, 0x1a, 0xf2, 0x2d, 0xc5, 0xeb,
0x27, 0xd8, 0x0a, 0xdd, 0x4c, 0xc5, 0xd9, 0x80,
0x90, 0xc7, 0x6c, 0x43, 0xce, 0x81, 0x23, 0x68,
0x8e, 0xc8, 0x98, 0x56, 0x8c, 0x68, 0x1, 0xd1, 0x3d, 0xf4, 0xee, 0x6e, 0x2a, 0x1b, 0x54, 0xb7, 0xe6, 0x23, 0x6f, 0x67, 0x1f, 0x20, 0x95, 0x4f, 0x5, 0x30, 0x64, 0x10, 0x51, 0x8e, 0xeb, 0x32,
})
// genesisBlock defines the genesis block of the block DAG which serves as the
@@ -54,11 +71,13 @@ var genesisBlock = externalapi.DomainBlock{
[]externalapi.BlockLevelParents{},
genesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17c2df949fe,
0x207fffff,
0x0,
0,
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x71, 0x0f, 0x27, 0xdf, 0x42, 0x3e, 0x63, 0xaa, 0x6c, 0xdb, 0x72, 0xb8, 0x9e, 0xa5, 0xa0, 0x6c, 0xff, 0xa3, 0x99, 0xd6, 0x6f, 0x16, 0x77, 0x04, 0x45, 0x5b, 0x5a, 0xf5, 0x9d, 0xef, 0x8e, 0x20,
}),
1637609671037,
486722099,
0x3392c,
1312860, // Checkpoint DAA score
0,
big.NewInt(0),
&externalapi.DomainHash{},
@@ -86,10 +105,10 @@ var devnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0,
// devGenesisHash is the hash of the first block in the block DAG for the development
// network (genesis block).
var devnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0xf1, 0xa4, 0x65, 0xfe, 0xd2, 0x08, 0x54, 0xc1,
0x72, 0x60, 0xdc, 0x8b, 0x29, 0x67, 0x90, 0x1f,
0xa9, 0x48, 0x47, 0x4e, 0x63, 0xa1, 0xfc, 0x09,
0x6d, 0x10, 0x7b, 0x08, 0x23, 0x37, 0xfd, 0x59,
0x34, 0x3b, 0x53, 0xb7, 0xad, 0x82, 0x8a, 0x33,
0x4b, 0x03, 0xb1, 0xbb, 0x13, 0x15, 0xb0, 0x75,
0xf3, 0xf0, 0x40, 0xfb, 0x64, 0x0c, 0xca, 0x19,
0xc9, 0x61, 0xe7, 0x69, 0xdb, 0x98, 0x98, 0x3e,
})
// devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@@ -112,7 +131,7 @@ var devnetGenesisBlock = externalapi.DomainBlock{
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x11e9db49828,
0x1e7fffff,
0x268db,
0x48e5e,
0,
0,
big.NewInt(0),
@@ -140,10 +159,10 @@ var simnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0,
// simnetGenesisHash is the hash of the first block in the block DAG for
// the simnet (genesis block).
var simnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x9b, 0x66, 0xc8, 0x1a, 0xd5, 0xb8, 0xc2, 0xf7,
0x45, 0x45, 0x47, 0xd6, 0x83, 0x83, 0xf8, 0x27,
0x45, 0x46, 0xcf, 0x98, 0x1c, 0x74, 0x46, 0xcf,
0x25, 0x5e, 0xd3, 0xe7, 0xe4, 0x68, 0x47, 0x6f,
0x41, 0x1f, 0x8c, 0xd2, 0x6f, 0x3d, 0x41, 0xae,
0xa3, 0x9e, 0x78, 0x57, 0x39, 0x27, 0xda, 0x24,
0xd2, 0x39, 0x95, 0x70, 0x5b, 0x57, 0x9f, 0x30,
0x95, 0x9b, 0x91, 0x27, 0xe9, 0x6b, 0x79, 0xe3,
})
// simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@@ -164,9 +183,9 @@ var simnetGenesisBlock = externalapi.DomainBlock{
simnetGenesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17c2df94abb,
0x17c5f62fbb6,
0x207fffff,
0x0,
0x2,
0,
0,
big.NewInt(0),
@@ -194,10 +213,10 @@ var testnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0,
// testnetGenesisHash is the hash of the first block in the block DAG for the test
// network (genesis block).
var testnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x47, 0x4f, 0xfd, 0xd1, 0xf8, 0x3d, 0x1d, 0x00,
0xcb, 0x07, 0x17, 0x91, 0x56, 0x60, 0xd4, 0xa7,
0x27, 0x5e, 0x43, 0xc2, 0x74, 0xe6, 0x76, 0x6e,
0x34, 0xe4, 0x13, 0xbb, 0x24, 0x02, 0x30, 0x66,
0xf8, 0x96, 0xa3, 0x03, 0x48, 0x73, 0xbe, 0x17,
0x39, 0xfc, 0x43, 0x59, 0x23, 0x68, 0x99, 0xfd,
0x3d, 0x65, 0xd2, 0xbc, 0x94, 0xf9, 0x78, 0x0d,
0xf0, 0xd0, 0xda, 0x3e, 0xb1, 0xcc, 0x43, 0x70,
})
// testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block
@@ -218,9 +237,9 @@ var testnetGenesisBlock = externalapi.DomainBlock{
testnetGenesisMerkleRoot,
&externalapi.DomainHash{},
externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()),
0x17c2df94abb,
0x17c5f62fbb6,
0x1e7fffff,
0x35f16,
0x14582,
0,
0,
big.NewInt(0),

View File

@@ -185,7 +185,7 @@ type Params struct {
FixedSubsidySwitchPruningPointInterval uint64
FixedSubsidySwitchHashRateDifference *big.Int
FixedSubsidySwitchHashRateThreshold *big.Int
}
// NormalizeRPCServerAddress returns addr with the current network default
@@ -211,6 +211,15 @@ var MainnetParams = Params{
Net: appmessage.Mainnet,
RPCPort: "16110",
DefaultPort: "16111",
DNSSeeds: []string{
"mainnet-dnsseed.daglabs-dev.com",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-1.kaspanet.org",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-2.kaspanet.org",
// This DNS seeder is run by Elichai Turkel
"kaspa.turkel.in",
},
// DAG parameters
GenesisBlock: &genesisBlock,
@@ -262,7 +271,7 @@ var MainnetParams = Params{
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateDifference: big.NewInt(1_000_000),
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
}
// TestnetParams defines the network parameters for the test Kaspa network.
@@ -324,7 +333,7 @@ var TestnetParams = Params{
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateDifference: big.NewInt(1_000_000),
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
}
// SimnetParams defines the network parameters for the simulation test Kaspa
@@ -390,7 +399,7 @@ var SimnetParams = Params{
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateDifference: big.NewInt(1_000_000),
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
}
// DevnetParams defines the network parameters for the development Kaspa network.
@@ -452,7 +461,7 @@ var DevnetParams = Params{
CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength,
PruningProofM: defaultPruningProofM,
FixedSubsidySwitchPruningPointInterval: defaultFixedSubsidySwitchPruningPointInterval,
FixedSubsidySwitchHashRateDifference: big.NewInt(1_000_000),
FixedSubsidySwitchHashRateThreshold: big.NewInt(150_000_000_000),
}
var (

View File

@@ -40,7 +40,7 @@ func TestValidateAndInsertTransaction(t *testing.T) {
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionsToInsert := make([]*externalapi.DomainTransaction, 10)
for i := range transactionsToInsert {
transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i)
transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i, 0)
_, err = miningManager.ValidateAndInsertTransaction(transactionsToInsert[i], false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
@@ -89,7 +89,7 @@ func TestImmatureSpend(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
tx := createTransactionWithUTXOEntry(t, 0)
tx := createTransactionWithUTXOEntry(t, 0, consensusConfig.GenesisBlock.Header.DAAScore())
_, err = miningManager.ValidateAndInsertTransaction(tx, false, false)
txRuleError := &mempool.TxRuleError{}
if !errors.As(err, txRuleError) || txRuleError.RejectCode != mempool.RejectImmatureSpend {
@@ -119,7 +119,7 @@ func TestInsertDoubleTransactionsToMempool(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transaction := createTransactionWithUTXOEntry(t, 0)
transaction := createTransactionWithUTXOEntry(t, 0, 0)
_, err = miningManager.ValidateAndInsertTransaction(transaction, false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
@@ -186,7 +186,7 @@ func TestHandleNewBlockTransactions(t *testing.T) {
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionsToInsert := make([]*externalapi.DomainTransaction, 10)
for i := range transactionsToInsert {
transaction := createTransactionWithUTXOEntry(t, i)
transaction := createTransactionWithUTXOEntry(t, i, 0)
transactionsToInsert[i] = transaction
_, err = miningManager.ValidateAndInsertTransaction(transaction, false, true)
if err != nil {
@@ -253,12 +253,12 @@ func TestDoubleSpendWithBlock(t *testing.T) {
tcAsConsensusPointer := &tcAsConsensus
consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer)
miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params))
transactionInTheMempool := createTransactionWithUTXOEntry(t, 0)
transactionInTheMempool := createTransactionWithUTXOEntry(t, 0, 0)
_, err = miningManager.ValidateAndInsertTransaction(transactionInTheMempool, false, true)
if err != nil {
t.Fatalf("ValidateAndInsertTransaction: %v", err)
}
doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0)
doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0, 0)
doubleSpendTransactionInTheBlock.Inputs[0].PreviousOutpoint = transactionInTheMempool.Inputs[0].PreviousOutpoint
blockTransactions := []*externalapi.DomainTransaction{nil, doubleSpendTransactionInTheBlock}
_, err = miningManager.HandleNewBlockTransactions(blockTransactions)
@@ -571,7 +571,7 @@ func TestRevalidateHighPriorityTransactions(t *testing.T) {
})
}
func createTransactionWithUTXOEntry(t *testing.T, i int) *externalapi.DomainTransaction {
func createTransactionWithUTXOEntry(t *testing.T, i int, daaScore uint64) *externalapi.DomainTransaction {
prevOutTxID := externalapi.DomainTransactionID{}
prevOutPoint := externalapi.DomainOutpoint{TransactionID: prevOutTxID, Index: uint32(i)}
scriptPublicKey, redeemScript := testutils.OpTrueScript()
@@ -587,7 +587,7 @@ func createTransactionWithUTXOEntry(t *testing.T, i int) *externalapi.DomainTran
100000000, // 1 KAS
scriptPublicKey,
true,
uint64(0)),
daaScore),
}
txOut := externalapi.DomainTransactionOutput{
Value: 10000,

View File

@@ -29,7 +29,6 @@ import (
const (
defaultConfigFilename = "kaspad.conf"
defaultDataDirname = "data"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "kaspad.log"
@@ -86,7 +85,7 @@ type Flags struct {
Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 16111, testnet: 16211)"`
TargetOutboundPeers int `long:"outpeers" description:"Target number of outbound peers"`
MaxInboundPeers int `long:"maxinpeers" description:"Max number of inbound peers"`
DisableBanning bool `long:"nobanning" description:"Disable banning of misbehaving peers"`
EnableBanning bool `long:"enablebanning" description:"Enable banning of misbehaving peers"`
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."`
Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"`
@@ -121,6 +120,7 @@ type Flags struct {
MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"`
UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"`
IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"`
AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"`
EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"`
NetworkFlags
ServiceOptions *ServiceOptions

View File

@@ -48,6 +48,7 @@ type overrideDAGParamsConfig struct {
EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"`
DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"`
SkipProofOfWork *bool `json:"skipProofOfWork"`
HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"`
}
// ResolveNetwork parses the network command line argument and sets NetParams accordingly.
@@ -81,10 +82,6 @@ func (networkFlags *NetworkFlags) ResolveNetwork(parser *flags.Parser) error {
return err
}
if numNets == 0 {
return errors.Errorf("Mainnet has not launched yet, use --testnet to run in testnet mode")
}
err := networkFlags.overrideDAGParams()
if err != nil {
return err

View File

@@ -66,7 +66,7 @@ func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) {
log.Debugf("Connecting to connection request %s", connReq.address)
err := c.initiateConnection(connReq.address)
if err != nil {
log.Infof("Couldn't connect to %s: %s", address, err)
log.Infof("Couldn't connect to requested connection %s: %s", address, err)
// if connection request is one try - remove from pending and ignore failure
if !connReq.isPermanent {
delete(c.pendingRequested, address)

View File

@@ -41,7 +41,7 @@ func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) {
err := c.initiateConnection(addressString)
if err != nil {
log.Infof("Couldn't connect to %s: %s", addressString, err)
log.Debugf("Couldn't connect to %s: %s", addressString, err)
c.addressManager.MarkConnectionFailure(netAddress)
continue
}

View File

@@ -18,7 +18,7 @@ type p2pServer struct {
gRPCServer
}
const p2pMaxMessageSize = 100 * 1024 * 1024 // 100MB
const p2pMaxMessageSize = 1024 * 1024 * 1024 // 1GB
// p2pMaxInboundConnections is the max amount of inbound connections for the P2P server.
// Note that inbound connections are not limited by the gRPC server. (A value of 0 means
@@ -45,7 +45,7 @@ func (p *p2pServer) MessageStream(stream protowire.P2P_MessageStreamServer) erro
func (p *p2pServer) Connect(address string) (server.Connection, error) {
log.Debugf("%s Dialing to %s", p.name, address)
const dialTimeout = 30 * time.Second
const dialTimeout = 1 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), dialTimeout)
defer cancel()

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build !windows && !plan9
// +build !windows,!plan9
package limits

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package signal

View File

@@ -7,9 +7,7 @@ import (
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
"github.com/kaspanet/kaspad/stability-tests/common"
"github.com/kaspanet/kaspad/util/difficulty"
"math"
"math/big"
"math/rand"
"os"
"testing"
@@ -48,7 +46,7 @@ func TestDAA(t *testing.T) {
},
{
name: "sudden hash rate drop",
runDuration: 15 * time.Minute,
runDuration: 45 * time.Minute,
targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 {
if totalElapsedDuration < 5*time.Minute {
return machineHashNanoseconds * 2
@@ -130,7 +128,7 @@ func TestDAA(t *testing.T) {
},
{
name: "constant exponential hash rate decrease",
runDuration: 15 * time.Minute,
runDuration: 45 * time.Minute,
targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 {
fromHashNanoseconds := machineHashNanoseconds * 2
toHashNanoseconds := machineHashNanoseconds * 10
@@ -162,17 +160,15 @@ func measureMachineHashNanoseconds(t *testing.T) int64 {
defer t.Logf("Finished measuring machine hash rate")
genesisBlock := dagconfig.DevnetParams.GenesisBlock
targetDifficulty := difficulty.CompactToBig(genesisBlock.Header.Bits())
headerForMining := genesisBlock.Header.ToMutable()
state := pow.NewState(genesisBlock.Header.ToMutable())
machineHashesPerSecondMeasurementDuration := 10 * time.Second
hashes := int64(0)
nonce := rand.Uint64()
state.Nonce = rand.Uint64()
loopForDuration(machineHashesPerSecondMeasurementDuration, func(isFinished *bool) {
headerForMining.SetNonce(nonce)
pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty)
state.CheckProofOfWork()
hashes++
nonce++
state.IncrementNonce()
})
return machineHashesPerSecondMeasurementDuration.Nanoseconds() / hashes
@@ -202,17 +198,18 @@ func runDAATest(t *testing.T, testName string, runDuration time.Duration,
startTime := time.Now()
loopForDuration(runDuration, func(isFinished *bool) {
templateBlock := fetchBlockForMining(t, rpcClient)
targetDifficulty := difficulty.CompactToBig(templateBlock.Header.Bits())
headerForMining := templateBlock.Header.ToMutable()
minerState := pow.NewState(headerForMining)
// Try hashes until we find a valid block
miningStartTime := time.Now()
nonce := rand.Uint64()
minerState.Nonce = rand.Uint64()
for {
hashStartTime := time.Now()
blockFound := tryNonceForMiningAndIncrementNonce(headerForMining, &nonce, targetDifficulty, templateBlock)
if blockFound {
if minerState.CheckProofOfWork() {
headerForMining.SetNonce(minerState.Nonce)
templateBlock.Header = headerForMining.ToImmutable()
break
}
@@ -227,6 +224,7 @@ func runDAATest(t *testing.T, testName string, runDuration time.Duration,
if *isFinished {
return
}
minerState.IncrementNonce()
}
// Collect stats about block rate
@@ -265,19 +263,6 @@ func fetchBlockForMining(t *testing.T, rpcClient *rpcclient.RPCClient) *external
return templateBlock
}
func tryNonceForMiningAndIncrementNonce(headerForMining externalapi.MutableBlockHeader, nonce *uint64,
targetDifficulty *big.Int, templateBlock *externalapi.DomainBlock) bool {
headerForMining.SetNonce(*nonce)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
templateBlock.Header = headerForMining.ToImmutable()
return true
}
*nonce++
return false
}
func waitUntilTargetHashDurationHadElapsed(startTime time.Time, hashStartTime time.Time,
targetHashNanosecondsFunction func(totalElapsedDuration time.Duration) int64) {

View File

@@ -4,11 +4,9 @@ import (
"fmt"
"github.com/kaspanet/go-secp256k1"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/difficulty"
"math"
"math/rand"
"os"
"os/exec"
"strings"
@@ -78,8 +76,8 @@ func realMain() error {
genesisTimestamp := activeConfig().NetParams().GenesisBlock.Header.TimeInMilliseconds()
mutableHeader.SetTimeInMilliseconds(genesisTimestamp + 1000)
block.Header = mutableHeader.ToImmutable()
solvedBlock := solveBlock(block)
_, err = rpcClient.SubmitBlock(solvedBlock)
mining.SolveBlock(block, rand.New(rand.NewSource(time.Now().UnixNano())))
_, err = rpcClient.SubmitBlock(block)
if err != nil {
return err
}
@@ -117,6 +115,7 @@ func startNode() (teardown func(), err error) {
"--logdir", dataDir,
"--rpclisten", rpcAddress,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err
@@ -183,8 +182,8 @@ func mineBlock(rpcClient *rpc.Client, miningAddress util.Address) error {
if err != nil {
return err
}
solvedBlock := solveBlock(block)
_, err = rpcClient.SubmitBlock(solvedBlock)
mining.SolveBlock(block, rand.New(rand.NewSource(time.Now().UnixNano())))
_, err = rpcClient.SubmitBlock(block)
if err != nil {
return err
}
@@ -200,9 +199,10 @@ func mineTips(numOfTips int, miningAddress util.Address, rpcClient *rpc.Client)
if err != nil {
return err
}
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < numOfTips; i++ {
solvedBlock := solveBlock(block)
_, err = rpcClient.SubmitBlock(solvedBlock)
mining.SolveBlock(block, rd)
_, err = rpcClient.SubmitBlock(block)
if err != nil {
return err
}
@@ -296,23 +296,6 @@ func getCurrentTipsLength(rpcClient *rpc.Client) (int, error) {
return len(dagInfo.TipHashes), nil
}
var latestNonce uint64 = 0 // Use to make the nonce unique.
// The nonce is unique for each block in this function.
func solveBlock(block *externalapi.DomainBlock) *externalapi.DomainBlock {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
maxUInt64 := uint64(math.MaxUint64)
for i := latestNonce; i < maxUInt64; i++ {
headerForMining.SetNonce(i)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
latestNonce = i + 1
return block
}
}
panic("Failed to solve block!")
}
func killWithSigterm(cmd *exec.Cmd, commandName string) {
err := cmd.Process.Signal(syscall.SIGTERM)
if err != nil {

View File

@@ -1 +1 @@
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000}
{"skipProofOfWork":true, "mergeSetSizeLimit": 30, "finalityDuration": 30000, "hardForkOmitGenesisFromParentsDaaScore": 2505}

View File

@@ -38,6 +38,7 @@ func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir st
"--listen", listen,
"--profile", profilePort,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
}
if connect != "" {
args = append(args, "--connect", connect)

View File

@@ -44,6 +44,7 @@ func startNodes() (teardown func(), err error) {
"--rpclisten", syncerRPCAddress,
"--listen", syncerListen,
"--loglevel", "debug",
"--allow-submit-block-when-not-synced",
)
if err != nil {
return nil, err

View File

@@ -36,6 +36,7 @@ func setConfig(t *testing.T, harness *appHarness) {
harness.config.Listeners = []string{harness.p2pAddress}
harness.config.RPCListeners = []string{harness.rpcAddress}
harness.config.UTXOIndex = harness.utxoIndex
harness.config.AllowSubmitBlockWhenNotSynced = true
if harness.overrideDAGParams != nil {
harness.config.ActiveNetParams = harness.overrideDAGParams

View File

@@ -3,6 +3,8 @@ package integration
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
"math/rand"
"reflect"
"sync"
"testing"
@@ -117,7 +119,7 @@ func TestIBDWithPruning(t *testing.T) {
}
// This should trigger resolving the syncee virtual
syncerTip := mineNextBlockWithMockTimestamps(t, syncer)
syncerTip := mineNextBlockWithMockTimestamps(t, syncer, rand.New(rand.NewSource(time.Now().UnixNano())))
time.Sleep(time.Second)
synceeSelectedTip, err := syncee.rpcClient.GetSelectedTipHash()
if err != nil {
@@ -184,12 +186,13 @@ func TestIBDWithPruning(t *testing.T) {
// iteration to find the highest shared chain
// block.
const synceeOnlyBlocks = 2
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < synceeOnlyBlocks; i++ {
mineNextBlockWithMockTimestamps(t, syncee1)
mineNextBlockWithMockTimestamps(t, syncee1, rd)
}
for i := 0; i < numBlocks-1; i++ {
mineNextBlockWithMockTimestamps(t, syncer)
mineNextBlockWithMockTimestamps(t, syncer, rd)
}
testSync(syncer, syncee1)
@@ -203,7 +206,7 @@ var currentMockTimestamp int64 = 0
// mineNextBlockWithMockTimestamps mines blocks with large timestamp differences
// between every two blocks. This is done to avoid the timestamp threshold validation
// of ibd-with-headers-proof
func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness, rd *rand.Rand) *externalapi.DomainBlock {
blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress)
if err != nil {
t.Fatalf("Error getting block template: %+v", err)
@@ -223,7 +226,7 @@ func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness) *externa
mutableHeader.SetTimeInMilliseconds(currentMockTimestamp)
block.Header = mutableHeader.ToImmutable()
solveBlock(block)
mining.SolveBlock(block, rd)
_, err = harness.rpcClient.SubmitBlock(block)
if err != nil {

View File

@@ -3,28 +3,13 @@ package integration
import (
"math/rand"
"testing"
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/pow"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/kaspanet/kaspad/domain/consensus/utils/mining"
)
func solveBlock(block *externalapi.DomainBlock) *externalapi.DomainBlock {
targetDifficulty := difficulty.CompactToBig(block.Header.Bits())
headerForMining := block.Header.ToMutable()
initialNonce := rand.Uint64()
for i := initialNonce; i != initialNonce-1; i++ {
headerForMining.SetNonce(i)
if pow.CheckProofOfWorkWithTarget(headerForMining, targetDifficulty) {
block.Header = headerForMining.ToImmutable()
return block
}
}
panic("Failed to solve block! This should never happen")
}
func mineNextBlock(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress)
if err != nil {
@@ -36,7 +21,8 @@ func mineNextBlock(t *testing.T, harness *appHarness) *externalapi.DomainBlock {
t.Fatalf("Error converting block: %s", err)
}
solveBlock(block)
rd := rand.New(rand.NewSource(time.Now().UnixNano()))
mining.SolveBlock(block, rd)
_, err = harness.rpcClient.SubmitBlock(block)
if err != nil {

View File

@@ -14,7 +14,7 @@ import (
func TestGetHashrateString(t *testing.T) {
var results = map[string]string{
dagconfig.MainnetParams.Name: "2 H/s",
dagconfig.MainnetParams.Name: "1.53 GH/s",
dagconfig.TestnetParams.Name: "131.07 KH/s",
dagconfig.DevnetParams.Name: "131.07 KH/s",
dagconfig.SimnetParams.Name: "2.00 KH/s",

View File

@@ -11,7 +11,7 @@ const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs
const (
appMajor uint = 0
appMinor uint = 11
appPatch uint = 0
appPatch uint = 6
)
// appBuild is defined as a variable so it can be overridden during the build