mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 06:06:49 +00:00
[NOD-629] change GHOSTDAG k to uint8 (#594)
* [NOD-629] Change GHOSTDAG K to a a single byte using type * [NOD-629] Rename variable * [NOD-629] Rename K to KSize * [NOD-629] Remove redundant casting * [NOD-629] Add test for KSize * [NOD-629] Seperate block serialization and db store * [NOD-629] Make sure K is serialized as uint8 * [NOD-629] Rename KSize to KType * [NOD-629] Comment for test * [NOD-629] Change fail message * [NOD-629] Remove newlines * [NOD-629] Fix test * [NOD-629] Do not use maxuint8, but !0 instead * [NOD-629] Fix test * [NOD-629] Merge conflict * [NOD-629] Fix test; Update comment
This commit is contained in:
parent
29bcc271b5
commit
52e0a0967d
@ -6,6 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
"time"
|
||||
@ -79,7 +80,7 @@ type blockNode struct {
|
||||
|
||||
// bluesAnticoneSizes is a map holding the set of blues affected by this block and their
|
||||
// modified blue anticone size.
|
||||
bluesAnticoneSizes map[daghash.Hash]uint32
|
||||
bluesAnticoneSizes map[daghash.Hash]dagconfig.KType
|
||||
|
||||
// hash is the double sha 256 of the block.
|
||||
hash *daghash.Hash
|
||||
@ -115,7 +116,7 @@ func (dag *BlockDAG) newBlockNode(blockHeader *wire.BlockHeader, parents blockSe
|
||||
children: make(blockSet),
|
||||
blueScore: math.MaxUint64, // Initialized to the max value to avoid collisions with the genesis block
|
||||
timestamp: dag.AdjustedTime().Unix(),
|
||||
bluesAnticoneSizes: make(map[daghash.Hash]uint32),
|
||||
bluesAnticoneSizes: make(map[daghash.Hash]dagconfig.KType),
|
||||
}
|
||||
|
||||
// blockHeader is nil only for the virtual block
|
||||
|
40
blockdag/blocknode_test.go
Normal file
40
blockdag/blocknode_test.go
Normal file
@ -0,0 +1,40 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// This test is to ensure the size BlueAnticoneSizesSize is serialized to the size of KType.
|
||||
// We verify that by serializing and deserializing the block while making sure that we stay within the expected range.
|
||||
func TestBlueAnticoneSizesSize(t *testing.T) {
|
||||
dag, teardownFunc, err := DAGSetup("TestBlueAnticoneSizesSize", Config{
|
||||
DAGParams: &dagconfig.SimnetParams,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: Failed to setup DAG instance: %s", err)
|
||||
}
|
||||
defer teardownFunc()
|
||||
|
||||
k := dagconfig.KType(0)
|
||||
k--
|
||||
|
||||
if k < dagconfig.KType(0) {
|
||||
t.Fatalf("KType must be unsigned")
|
||||
}
|
||||
|
||||
blockHeader := dagconfig.SimnetParams.GenesisBlock.Header
|
||||
node, _ := dag.newBlockNode(&blockHeader, newSet())
|
||||
hash := daghash.Hash{1}
|
||||
// Setting maxKType to maximum value of KType.
|
||||
// As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType.
|
||||
maxKType := ^dagconfig.KType(0)
|
||||
node.bluesAnticoneSizes[hash] = maxKType
|
||||
serializedNode, _ := serializeBlockNode(node)
|
||||
deserializedNode, _ := dag.deserializeBlockNode(serializedNode)
|
||||
if deserializedNode.bluesAnticoneSizes[hash] != maxKType {
|
||||
t.Fatalf("TestBlueAnticoneSizesSize: BlueAnticoneSize should not change when deserializing. Expected: %v but got %v",
|
||||
maxKType, deserializedNode.bluesAnticoneSizes[hash])
|
||||
}
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sync"
|
||||
@ -705,13 +706,14 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.bluesAnticoneSizes = make(map[daghash.Hash]uint32)
|
||||
node.bluesAnticoneSizes = make(map[daghash.Hash]dagconfig.KType)
|
||||
for i := uint64(0); i < bluesAnticoneSizesLen; i++ {
|
||||
hash := &daghash.Hash{}
|
||||
if _, err := io.ReadFull(buffer, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.bluesAnticoneSizes[*hash], err = binaryserializer.Uint32(buffer, byteOrder)
|
||||
bluesAnticoneSize, err := binaryserializer.Uint8(buffer)
|
||||
node.bluesAnticoneSizes[*hash] = dagconfig.KType(bluesAnticoneSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -738,20 +740,17 @@ func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error)
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// dbStoreBlockNode stores the block node data into the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
// Serialize block data to be stored.
|
||||
func serializeBlockNode(node *blockNode) ([]byte, error) {
|
||||
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
|
||||
header := node.Header()
|
||||
err := header.Serialize(w)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = w.WriteByte(byte(node.status))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Because genesis doesn't have selected parent, it's serialized as zero hash
|
||||
@ -761,48 +760,55 @@ func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
}
|
||||
_, err = w.Write(selectedParentHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint64(w, byteOrder, node.blueScore)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = wire.WriteVarInt(w, uint64(len(node.blues)))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, blue := range node.blues {
|
||||
_, err = w.Write(blue.hash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = wire.WriteVarInt(w, uint64(len(node.bluesAnticoneSizes)))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
for blockHash, blueAnticoneSize := range node.bluesAnticoneSizes {
|
||||
_, err = w.Write(blockHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = binaryserializer.PutUint32(w, byteOrder, blueAnticoneSize)
|
||||
err = binaryserializer.PutUint8(w, uint8(blueAnticoneSize))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return w.Bytes(), nil
|
||||
}
|
||||
|
||||
value := w.Bytes()
|
||||
|
||||
// dbStoreBlockNode stores the block node data into the block
|
||||
// index bucket. This overwrites the current entry if there exists one.
|
||||
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
|
||||
serializedNode, err := serializeBlockNode(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Write block header data to block index bucket.
|
||||
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
|
||||
key := BlockIndexKey(node.hash, node.blueScore)
|
||||
return blockIndexBucket.Put(key, value)
|
||||
return blockIndexBucket.Put(key, serializedNode)
|
||||
}
|
||||
|
||||
// dbStoreBlock stores the provided block in the database if it is not already
|
||||
|
@ -320,7 +320,7 @@ func TestChainedTransactions(t *testing.T) {
|
||||
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
|
||||
// Create a new database and DAG instance to run tests against.
|
||||
params := dagconfig.SimnetParams
|
||||
params.K = math.MaxUint32
|
||||
params.K = math.MaxUint8
|
||||
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
|
||||
DAGParams: ¶ms,
|
||||
})
|
||||
|
@ -1,6 +1,7 @@
|
||||
package blockdag
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
"sort"
|
||||
)
|
||||
@ -38,8 +39,8 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
})
|
||||
|
||||
for _, blueCandidate := range selectedParentAnticone {
|
||||
candidateBluesAnticoneSizes := make(map[*blockNode]uint32)
|
||||
var candidateAnticoneSize uint32
|
||||
candidateBluesAnticoneSizes := make(map[*blockNode]dagconfig.KType)
|
||||
var candidateAnticoneSize dagconfig.KType
|
||||
possiblyBlue := true
|
||||
|
||||
// Iterate over all blocks in the blue set of newNode that are not in the past
|
||||
@ -108,7 +109,7 @@ func (dag *BlockDAG) ghostdag(newNode *blockNode) (selectedParentAnticone []*blo
|
||||
|
||||
// The maximum length of node.blues can be K+1 because
|
||||
// it contains the selected parent.
|
||||
if uint32(len(newNode.blues)) == dag.dagParams.K+1 {
|
||||
if dagconfig.KType(len(newNode.blues)) == dag.dagParams.K+1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -165,7 +166,7 @@ func (dag *BlockDAG) selectedParentAnticone(node *blockNode) ([]*blockNode, erro
|
||||
|
||||
// blueAnticoneSize returns the blue anticone size of 'block' from the worldview of 'context'.
|
||||
// Expects 'block' to be in the blue set of 'context'
|
||||
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (uint32, error) {
|
||||
func (dag *BlockDAG) blueAnticoneSize(block, context *blockNode) (dagconfig.KType, error) {
|
||||
for current := context; current != nil; current = current.selectedParent {
|
||||
if blueAnticoneSize, ok := current.bluesAnticoneSizes[*block.hash]; ok {
|
||||
return blueAnticoneSize, nil
|
||||
|
@ -24,7 +24,7 @@ func TestGHOSTDAG(t *testing.T) {
|
||||
dagParams := dagconfig.SimnetParams
|
||||
|
||||
tests := []struct {
|
||||
k uint32
|
||||
k dagconfig.KType
|
||||
expectedReds []string
|
||||
dagData []*testBlockData
|
||||
}{
|
||||
|
@ -6,6 +6,7 @@ package blockdag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
@ -208,7 +209,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags)
|
||||
// The number K*2 was chosen since in peace times anticone is limited to K blocks,
|
||||
// while some red block can make it a bit bigger, but much more than that indicates
|
||||
// there might be some problem with the netsync process.
|
||||
if flags&BFIsSync == BFIsSync && uint32(len(dag.orphans)) < dag.dagParams.K*2 {
|
||||
if flags&BFIsSync == BFIsSync && dagconfig.KType(len(dag.orphans)) < dag.dagParams.K*2 {
|
||||
log.Debugf("Adding orphan block %s. This is normal part of netsync process", blockHash)
|
||||
} else {
|
||||
log.Infof("Adding orphan block %s", blockHash)
|
||||
|
@ -82,11 +82,16 @@ const (
|
||||
DefinedDeployments
|
||||
)
|
||||
|
||||
// KType defines the size of GHOSTDAG consensus algorithm K parameter.
|
||||
type KType uint8
|
||||
|
||||
// Params defines a Kaspa network by its parameters. These parameters may be
|
||||
// used by Kaspa applications to differentiate networks as well as addresses
|
||||
// and keys for one network from those intended for use on another network.
|
||||
type Params struct {
|
||||
K uint32
|
||||
// K defines the K parameter for GHOSTDAG consensus algorithm.
|
||||
// See ghostdag.go for further details.
|
||||
K KType
|
||||
|
||||
// Name defines a human-readable identifier for the network.
|
||||
Name string
|
||||
|
Loading…
x
Reference in New Issue
Block a user