mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-22 11:39:15 +00:00
Compare commits
1 Commits
v0.8.3-dev
...
v0.8.1-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16e434aa91 |
@@ -5,8 +5,10 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
@@ -21,6 +23,16 @@ const MaxVarIntPayload = 9
|
||||
// MaxInvPerMsg is the maximum number of inventory vectors that can be in any type of kaspa inv message.
|
||||
const MaxInvPerMsg = 1 << 17
|
||||
|
||||
var (
|
||||
// littleEndian is a convenience variable since binary.LittleEndian is
|
||||
// quite long.
|
||||
littleEndian = binary.LittleEndian
|
||||
|
||||
// bigEndian is a convenience variable since binary.BigEndian is quite
|
||||
// long.
|
||||
bigEndian = binary.BigEndian
|
||||
)
|
||||
|
||||
// errNonCanonicalVarInt is the common format string used for non-canonically
|
||||
// encoded variable length integer errors.
|
||||
var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
|
||||
@@ -41,7 +53,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
// type assertions first.
|
||||
switch e := element.(type) {
|
||||
case *int32:
|
||||
rv, err := binaryserializer.Uint32(r)
|
||||
rv, err := binaryserializer.Uint32(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -49,7 +61,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
return nil
|
||||
|
||||
case *uint32:
|
||||
rv, err := binaryserializer.Uint32(r)
|
||||
rv, err := binaryserializer.Uint32(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -57,7 +69,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
return nil
|
||||
|
||||
case *int64:
|
||||
rv, err := binaryserializer.Uint64(r)
|
||||
rv, err := binaryserializer.Uint64(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,7 +77,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
return nil
|
||||
|
||||
case *uint64:
|
||||
rv, err := binaryserializer.Uint64(r)
|
||||
rv, err := binaryserializer.Uint64(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -94,7 +106,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
|
||||
// Unix timestamp encoded as an int64.
|
||||
case *int64Time:
|
||||
rv, err := binaryserializer.Uint64(r)
|
||||
rv, err := binaryserializer.Uint64(r, binary.LittleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,7 +123,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
|
||||
// Message header command.
|
||||
case *MessageCommand:
|
||||
rv, err := binaryserializer.Uint32(r)
|
||||
rv, err := binaryserializer.Uint32(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -144,7 +156,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
return nil
|
||||
|
||||
case *ServiceFlag:
|
||||
rv, err := binaryserializer.Uint64(r)
|
||||
rv, err := binaryserializer.Uint64(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -152,7 +164,7 @@ func ReadElement(r io.Reader, element interface{}) error {
|
||||
return nil
|
||||
|
||||
case *KaspaNet:
|
||||
rv, err := binaryserializer.Uint32(r)
|
||||
rv, err := binaryserializer.Uint32(r, littleEndian)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -181,28 +193,28 @@ func WriteElement(w io.Writer, element interface{}) error {
|
||||
// type assertions first.
|
||||
switch e := element.(type) {
|
||||
case int32:
|
||||
err := binaryserializer.PutUint32(w, uint32(e))
|
||||
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case uint32:
|
||||
err := binaryserializer.PutUint32(w, e)
|
||||
err := binaryserializer.PutUint32(w, littleEndian, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case int64:
|
||||
err := binaryserializer.PutUint64(w, uint64(e))
|
||||
err := binaryserializer.PutUint64(w, littleEndian, uint64(e))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case uint64:
|
||||
err := binaryserializer.PutUint64(w, e)
|
||||
err := binaryserializer.PutUint64(w, littleEndian, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -237,7 +249,7 @@ func WriteElement(w io.Writer, element interface{}) error {
|
||||
|
||||
// Message header command.
|
||||
case MessageCommand:
|
||||
err := binaryserializer.PutUint32(w, uint32(e))
|
||||
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,14 +281,14 @@ func WriteElement(w io.Writer, element interface{}) error {
|
||||
return nil
|
||||
|
||||
case ServiceFlag:
|
||||
err := binaryserializer.PutUint64(w, uint64(e))
|
||||
err := binaryserializer.PutUint64(w, littleEndian, uint64(e))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case KaspaNet:
|
||||
err := binaryserializer.PutUint32(w, uint32(e))
|
||||
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -308,7 +320,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
|
||||
var rv uint64
|
||||
switch discriminant {
|
||||
case 0xff:
|
||||
sv, err := binaryserializer.Uint64(r)
|
||||
sv, err := binaryserializer.Uint64(r, littleEndian)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -323,7 +335,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
|
||||
}
|
||||
|
||||
case 0xfe:
|
||||
sv, err := binaryserializer.Uint32(r)
|
||||
sv, err := binaryserializer.Uint32(r, littleEndian)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -338,7 +350,7 @@ func ReadVarInt(r io.Reader) (uint64, error) {
|
||||
}
|
||||
|
||||
case 0xfd:
|
||||
sv, err := binaryserializer.Uint16(r)
|
||||
sv, err := binaryserializer.Uint16(r, littleEndian)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -358,3 +370,143 @@ func ReadVarInt(r io.Reader) (uint64, error) {
|
||||
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
// WriteVarInt serializes val to w using a variable number of bytes depending
|
||||
// on its value.
|
||||
func WriteVarInt(w io.Writer, val uint64) error {
|
||||
if val < 0xfd {
|
||||
_, err := w.Write([]byte{uint8(val)})
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if val <= math.MaxUint16 {
|
||||
var buf [3]byte
|
||||
buf[0] = 0xfd
|
||||
littleEndian.PutUint16(buf[1:], uint16(val))
|
||||
_, err := w.Write(buf[:])
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if val <= math.MaxUint32 {
|
||||
var buf [5]byte
|
||||
buf[0] = 0xfe
|
||||
littleEndian.PutUint32(buf[1:], uint32(val))
|
||||
_, err := w.Write(buf[:])
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
var buf [9]byte
|
||||
buf[0] = 0xff
|
||||
littleEndian.PutUint64(buf[1:], val)
|
||||
_, err := w.Write(buf[:])
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// VarIntSerializeSize returns the number of bytes it would take to serialize
|
||||
// val as a variable length integer.
|
||||
func VarIntSerializeSize(val uint64) int {
|
||||
// The value is small enough to be represented by itself, so it's
|
||||
// just 1 byte.
|
||||
if val < 0xfd {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Discriminant 1 byte plus 2 bytes for the uint16.
|
||||
if val <= math.MaxUint16 {
|
||||
return 3
|
||||
}
|
||||
|
||||
// Discriminant 1 byte plus 4 bytes for the uint32.
|
||||
if val <= math.MaxUint32 {
|
||||
return 5
|
||||
}
|
||||
|
||||
// Discriminant 1 byte plus 8 bytes for the uint64.
|
||||
return 9
|
||||
}
|
||||
|
||||
// ReadVarString reads a variable length string from r and returns it as a Go
|
||||
// string. A variable length string is encoded as a variable length integer
|
||||
// containing the length of the string followed by the bytes that represent the
|
||||
// string itself. An error is returned if the length is greater than the
|
||||
// maximum block payload size since it helps protect against memory exhaustion
|
||||
// attacks and forced panics through malformed messages.
|
||||
func ReadVarString(r io.Reader, pver uint32) (string, error) {
|
||||
count, err := ReadVarInt(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Prevent variable length strings that are larger than the maximum
|
||||
// message size. It would be possible to cause memory exhaustion and
|
||||
// panics without a sane upper bound on this count.
|
||||
if count > MaxMessagePayload {
|
||||
str := fmt.Sprintf("variable length string is too long "+
|
||||
"[count %d, max %d]", count, MaxMessagePayload)
|
||||
return "", messageError("ReadVarString", str)
|
||||
}
|
||||
|
||||
buf := make([]byte, count)
|
||||
_, err = io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// WriteVarString serializes str to w as a variable length integer containing
|
||||
// the length of the string followed by the bytes that represent the string
|
||||
// itself.
|
||||
func WriteVarString(w io.Writer, str string) error {
|
||||
err := WriteVarInt(w, uint64(len(str)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write([]byte(str))
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadVarBytes reads a variable length byte array. A byte array is encoded
|
||||
// as a varInt containing the length of the array followed by the bytes
|
||||
// themselves. An error is returned if the length is greater than the
|
||||
// passed maxAllowed parameter which helps protect against memory exhaustion
|
||||
// attacks and forced panics through malformed messages. The fieldName
|
||||
// parameter is only used for the error message so it provides more context in
|
||||
// the error.
|
||||
func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,
|
||||
fieldName string) ([]byte, error) {
|
||||
|
||||
count, err := ReadVarInt(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prevent byte array larger than the max message size. It would
|
||||
// be possible to cause memory exhaustion and panics without a sane
|
||||
// upper bound on this count.
|
||||
if count > uint64(maxAllowed) {
|
||||
str := fmt.Sprintf("%s is larger than the max allowed size "+
|
||||
"[count %d, max %d]", fieldName, count, maxAllowed)
|
||||
return nil, messageError("ReadVarBytes", str)
|
||||
}
|
||||
|
||||
b := make([]byte, count)
|
||||
_, err = io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WriteVarBytes serializes a variable length byte array to w as a varInt
|
||||
// containing the number of bytes, followed by the bytes themselves.
|
||||
func WriteVarBytes(w io.Writer, pver uint32, bytes []byte) error {
|
||||
slen := uint64(len(bytes))
|
||||
err := WriteVarInt(w, slen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(bytes)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -232,3 +233,464 @@ func TestElementEncodingErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarIntEncoding tests appmessage encode and decode for variable length integers.
|
||||
func TestVarIntEncoding(t *testing.T) {
|
||||
tests := []struct {
|
||||
value uint64 // Value to encode
|
||||
buf []byte // Encoded value
|
||||
}{
|
||||
// Latest protocol version.
|
||||
// Single byte
|
||||
{0, []byte{0x00}},
|
||||
// Max single byte
|
||||
{0xfc, []byte{0xfc}},
|
||||
// Min 2-byte
|
||||
{0xfd, []byte{0xfd, 0x0fd, 0x00}},
|
||||
// Max 2-byte
|
||||
{0xffff, []byte{0xfd, 0xff, 0xff}},
|
||||
// Min 4-byte
|
||||
{0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
|
||||
// Max 4-byte
|
||||
{0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
|
||||
// Min 8-byte
|
||||
{
|
||||
0x100000000,
|
||||
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
|
||||
},
|
||||
// Max 8-byte
|
||||
{
|
||||
0xffffffffffffffff,
|
||||
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Encode to appmessage format.
|
||||
buf := &bytes.Buffer{}
|
||||
err := WriteVarInt(buf, test.value)
|
||||
if err != nil {
|
||||
t.Errorf("WriteVarInt #%d error %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.buf) {
|
||||
t.Errorf("WriteVarInt #%d\n got: %s want: %s", i,
|
||||
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode from appmessage format.
|
||||
rbuf := bytes.NewReader(test.buf)
|
||||
val, err := ReadVarInt(rbuf)
|
||||
if err != nil {
|
||||
t.Errorf("ReadVarInt #%d error %v", i, err)
|
||||
continue
|
||||
}
|
||||
if val != test.value {
|
||||
t.Errorf("ReadVarInt #%d\n got: %x want: %x", i,
|
||||
val, test.value)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarIntEncodingErrors performs negative tests against appmessage encode and decode
|
||||
// of variable length integers to confirm error paths work correctly.
|
||||
func TestVarIntEncodingErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
in uint64 // Value to encode
|
||||
buf []byte // Encoded value
|
||||
max int // Max size of fixed buffer to induce errors
|
||||
writeErr error // Expected write error
|
||||
readErr error // Expected read error
|
||||
}{
|
||||
// Force errors on discriminant.
|
||||
{0, []byte{0x00}, 0, io.ErrShortWrite, io.EOF},
|
||||
// Force errors on 2-byte read/write.
|
||||
{0xfd, []byte{0xfd}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
|
||||
{0xfd, []byte{0xfd}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
|
||||
// Force errors on 4-byte read/write.
|
||||
{0x10000, []byte{0xfe}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
|
||||
{0x10000, []byte{0xfe}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
|
||||
// Force errors on 8-byte read/write.
|
||||
{0x100000000, []byte{0xff}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
|
||||
{0x100000000, []byte{0xff}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Encode to appmessage format.
|
||||
w := newFixedWriter(test.max)
|
||||
err := WriteVarInt(w, test.in)
|
||||
if !errors.Is(err, test.writeErr) {
|
||||
t.Errorf("WriteVarInt #%d wrong error got: %v, want: %v",
|
||||
i, err, test.writeErr)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode from appmessage format.
|
||||
r := newFixedReader(test.max, test.buf)
|
||||
_, err = ReadVarInt(r)
|
||||
if !errors.Is(err, test.readErr) {
|
||||
t.Errorf("ReadVarInt #%d wrong error got: %v, want: %v",
|
||||
i, err, test.readErr)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarIntNonCanonical ensures variable length integers that are not encoded
|
||||
// canonically return the expected error.
|
||||
func TestVarIntNonCanonical(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
tests := []struct {
|
||||
name string // Test name for easier identification
|
||||
in []byte // Value to decode
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
}{
|
||||
{
|
||||
"0 encoded with 3 bytes", []byte{0xfd, 0x00, 0x00},
|
||||
pver,
|
||||
},
|
||||
{
|
||||
"max single-byte value encoded with 3 bytes",
|
||||
[]byte{0xfd, 0xfc, 0x00}, pver,
|
||||
},
|
||||
{
|
||||
"0 encoded with 5 bytes",
|
||||
[]byte{0xfe, 0x00, 0x00, 0x00, 0x00}, pver,
|
||||
},
|
||||
{
|
||||
"max three-byte value encoded with 5 bytes",
|
||||
[]byte{0xfe, 0xff, 0xff, 0x00, 0x00}, pver,
|
||||
},
|
||||
{
|
||||
"0 encoded with 9 bytes",
|
||||
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
pver,
|
||||
},
|
||||
{
|
||||
"max five-byte value encoded with 9 bytes",
|
||||
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00},
|
||||
pver,
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Decode from appmessage format.
|
||||
rbuf := bytes.NewReader(test.in)
|
||||
val, err := ReadVarInt(rbuf)
|
||||
if msgErr := &(MessageError{}); !errors.As(err, &msgErr) {
|
||||
t.Errorf("ReadVarInt #%d (%s) unexpected error %v", i,
|
||||
test.name, err)
|
||||
continue
|
||||
}
|
||||
if val != 0 {
|
||||
t.Errorf("ReadVarInt #%d (%s)\n got: %d want: 0", i,
|
||||
test.name, val)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarIntEncoding tests the serialize size for variable length integers.
|
||||
func TestVarIntSerializeSize(t *testing.T) {
|
||||
tests := []struct {
|
||||
val uint64 // Value to get the serialized size for
|
||||
size int // Expected serialized size
|
||||
}{
|
||||
// Single byte
|
||||
{0, 1},
|
||||
// Max single byte
|
||||
{0xfc, 1},
|
||||
// Min 2-byte
|
||||
{0xfd, 3},
|
||||
// Max 2-byte
|
||||
{0xffff, 3},
|
||||
// Min 4-byte
|
||||
{0x10000, 5},
|
||||
// Max 4-byte
|
||||
{0xffffffff, 5},
|
||||
// Min 8-byte
|
||||
{0x100000000, 9},
|
||||
// Max 8-byte
|
||||
{0xffffffffffffffff, 9},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
serializedSize := VarIntSerializeSize(test.val)
|
||||
if serializedSize != test.size {
|
||||
t.Errorf("VarIntSerializeSize #%d got: %d, want: %d", i,
|
||||
serializedSize, test.size)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarStringEncoding tests appmessage encode and decode for variable length strings.
|
||||
func TestVarStringEncoding(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// str256 is a string that takes a 2-byte varint to encode.
|
||||
str256 := strings.Repeat("test", 64)
|
||||
|
||||
tests := []struct {
|
||||
in string // String to encode
|
||||
out string // String to decoded value
|
||||
buf []byte // Encoded value
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
}{
|
||||
// Latest protocol version.
|
||||
// Empty string
|
||||
{"", "", []byte{0x00}, pver},
|
||||
// Single byte varint + string
|
||||
{"Test", "Test", append([]byte{0x04}, []byte("Test")...), pver},
|
||||
// 2-byte varint + string
|
||||
{str256, str256, append([]byte{0xfd, 0x00, 0x01}, []byte(str256)...), pver},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Encode to appmessage format.
|
||||
var buf bytes.Buffer
|
||||
err := WriteVarString(&buf, test.in)
|
||||
if err != nil {
|
||||
t.Errorf("WriteVarString #%d error %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.buf) {
|
||||
t.Errorf("WriteVarString #%d\n got: %s want: %s", i,
|
||||
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode from appmessage format.
|
||||
rbuf := bytes.NewReader(test.buf)
|
||||
val, err := ReadVarString(rbuf, test.pver)
|
||||
if err != nil {
|
||||
t.Errorf("ReadVarString #%d error %v", i, err)
|
||||
continue
|
||||
}
|
||||
if val != test.out {
|
||||
t.Errorf("ReadVarString #%d\n got: %s want: %s", i,
|
||||
val, test.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarStringEncodingErrors performs negative tests against appmessage encode and
|
||||
// decode of variable length strings to confirm error paths work correctly.
|
||||
func TestVarStringEncodingErrors(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// str256 is a string that takes a 2-byte varint to encode.
|
||||
str256 := strings.Repeat("test", 64)
|
||||
|
||||
tests := []struct {
|
||||
in string // Value to encode
|
||||
buf []byte // Encoded value
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
max int // Max size of fixed buffer to induce errors
|
||||
writeErr error // Expected write error
|
||||
readErr error // Expected read error
|
||||
}{
|
||||
// Latest protocol version with intentional read/write errors.
|
||||
// Force errors on empty string.
|
||||
{"", []byte{0x00}, pver, 0, io.ErrShortWrite, io.EOF},
|
||||
// Force error on single byte varint + string.
|
||||
{"Test", []byte{0x04}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
|
||||
// Force errors on 2-byte varint + string.
|
||||
{str256, []byte{0xfd}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Encode to appmessage format.
|
||||
w := newFixedWriter(test.max)
|
||||
err := WriteVarString(w, test.in)
|
||||
if !errors.Is(err, test.writeErr) {
|
||||
t.Errorf("WriteVarString #%d wrong error got: %v, want: %v",
|
||||
i, err, test.writeErr)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode from appmessage format.
|
||||
r := newFixedReader(test.max, test.buf)
|
||||
_, err = ReadVarString(r, test.pver)
|
||||
if !errors.Is(err, test.readErr) {
|
||||
t.Errorf("ReadVarString #%d wrong error got: %v, want: %v",
|
||||
i, err, test.readErr)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarStringOverflowErrors performs tests to ensure deserializing variable
|
||||
// length strings intentionally crafted to use large values for the string
|
||||
// length are handled properly. This could otherwise potentially be used as an
|
||||
// attack vector.
|
||||
func TestVarStringOverflowErrors(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
tests := []struct {
|
||||
buf []byte // Encoded value
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
err error // Expected error
|
||||
}{
|
||||
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
|
||||
pver, &MessageError{}},
|
||||
{[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
pver, &MessageError{}},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Decode from appmessage format.
|
||||
rbuf := bytes.NewReader(test.buf)
|
||||
_, err := ReadVarString(rbuf, test.pver)
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
|
||||
t.Errorf("ReadVarString #%d wrong error got: %v, "+
|
||||
"want: %v", i, err, reflect.TypeOf(test.err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestVarBytesEncoding tests appmessage encode and decode for variable length byte array.
|
||||
func TestVarBytesEncoding(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// bytes256 is a byte array that takes a 2-byte varint to encode.
|
||||
bytes256 := bytes.Repeat([]byte{0x01}, 256)
|
||||
|
||||
tests := []struct {
|
||||
in []byte // Byte Array to write
|
||||
buf []byte // Encoded value
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
}{
|
||||
// Latest protocol version.
|
||||
// Empty byte array
|
||||
{[]byte{}, []byte{0x00}, pver},
|
||||
// Single byte varint + byte array
|
||||
{[]byte{0x01}, []byte{0x01, 0x01}, pver},
|
||||
// 2-byte varint + byte array
|
||||
{bytes256, append([]byte{0xfd, 0x00, 0x01}, bytes256...), pver},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Encode to appmessage format.
|
||||
var buf bytes.Buffer
|
||||
err := WriteVarBytes(&buf, test.pver, test.in)
|
||||
if err != nil {
|
||||
t.Errorf("WriteVarBytes #%d error %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.buf) {
|
||||
t.Errorf("WriteVarBytes #%d\n got: %s want: %s", i,
|
||||
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode from appmessage format.
|
||||
rbuf := bytes.NewReader(test.buf)
|
||||
val, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
|
||||
"test payload")
|
||||
if err != nil {
|
||||
t.Errorf("ReadVarBytes #%d error %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.buf) {
|
||||
t.Errorf("ReadVarBytes #%d\n got: %s want: %s", i,
|
||||
val, test.buf)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarBytesEncodingErrors performs negative tests against appmessage encode and
|
||||
// decode of variable length byte arrays to confirm error paths work correctly.
|
||||
func TestVarBytesEncodingErrors(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// bytes256 is a byte array that takes a 2-byte varint to encode.
|
||||
bytes256 := bytes.Repeat([]byte{0x01}, 256)
|
||||
|
||||
tests := []struct {
|
||||
in []byte // Byte Array to write
|
||||
buf []byte // Encoded value
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
max int // Max size of fixed buffer to induce errors
|
||||
writeErr error // Expected write error
|
||||
readErr error // Expected read error
|
||||
}{
|
||||
// Latest protocol version with intentional read/write errors.
|
||||
// Force errors on empty byte array.
|
||||
{[]byte{}, []byte{0x00}, pver, 0, io.ErrShortWrite, io.EOF},
|
||||
// Force error on single byte varint + byte array.
|
||||
{[]byte{0x01, 0x02, 0x03}, []byte{0x04}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
|
||||
// Force errors on 2-byte varint + byte array.
|
||||
{bytes256, []byte{0xfd}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Encode to appmessage format.
|
||||
w := newFixedWriter(test.max)
|
||||
err := WriteVarBytes(w, test.pver, test.in)
|
||||
if !errors.Is(err, test.writeErr) {
|
||||
t.Errorf("WriteVarBytes #%d wrong error got: %v, want: %v",
|
||||
i, err, test.writeErr)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode from appmessage format.
|
||||
r := newFixedReader(test.max, test.buf)
|
||||
_, err = ReadVarBytes(r, test.pver, MaxMessagePayload,
|
||||
"test payload")
|
||||
if !errors.Is(err, test.readErr) {
|
||||
t.Errorf("ReadVarBytes #%d wrong error got: %v, want: %v",
|
||||
i, err, test.readErr)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestVarBytesOverflowErrors performs tests to ensure deserializing variable
|
||||
// length byte arrays intentionally crafted to use large values for the array
|
||||
// length are handled properly. This could otherwise potentially be used as an
|
||||
// attack vector.
|
||||
func TestVarBytesOverflowErrors(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
tests := []struct {
|
||||
buf []byte // Encoded value
|
||||
pver uint32 // Protocol version for appmessage encoding
|
||||
err error // Expected error
|
||||
}{
|
||||
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
|
||||
pver, &MessageError{}},
|
||||
{[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
pver, &MessageError{}},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
for i, test := range tests {
|
||||
// Decode from appmessage format.
|
||||
rbuf := bytes.NewReader(test.buf)
|
||||
_, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
|
||||
"test payload")
|
||||
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
|
||||
t.Errorf("ReadVarBytes #%d wrong error got: %v, "+
|
||||
"want: %v", i, err, reflect.TypeOf(test.err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -41,6 +41,8 @@ const (
|
||||
CmdPong
|
||||
CmdRequestBlockLocator
|
||||
CmdBlockLocator
|
||||
CmdSelectedTip
|
||||
CmdRequestSelectedTip
|
||||
CmdInvRelayBlock
|
||||
CmdRequestRelayBlocks
|
||||
CmdInvTransaction
|
||||
@@ -121,6 +123,8 @@ var ProtocolMessageCommandToString = map[MessageCommand]string{
|
||||
CmdPong: "Pong",
|
||||
CmdRequestBlockLocator: "RequestBlockLocator",
|
||||
CmdBlockLocator: "BlockLocator",
|
||||
CmdSelectedTip: "SelectedTip",
|
||||
CmdRequestSelectedTip: "RequestSelectedTip",
|
||||
CmdInvRelayBlock: "InvRelayBlock",
|
||||
CmdRequestRelayBlocks: "RequestRelayBlocks",
|
||||
CmdInvTransaction: "InvTransaction",
|
||||
|
||||
@@ -7,7 +7,7 @@ package appmessage
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
@@ -73,7 +73,7 @@ func (h *MsgBlockHeader) NumParentBlocks() byte {
|
||||
|
||||
// BlockHash computes the block identifier hash for the given block header.
|
||||
func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
|
||||
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
|
||||
return consensusserialization.HeaderHash(BlockHeaderToDomainBlockHeader(h))
|
||||
}
|
||||
|
||||
// IsGenesis returns true iff this block is a genesis block
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestIBDBlock(t *testing.T) {
|
||||
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
|
||||
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(15)
|
||||
wantCmd := MessageCommand(17)
|
||||
msg := NewMsgIBDBlock(NewMsgBlock(bh))
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
|
||||
|
||||
@@ -5,14 +5,13 @@ import (
|
||||
)
|
||||
|
||||
// MsgRequestBlockLocator implements the Message interface and represents a kaspa
|
||||
// RequestBlockLocator message. It is used to request a block locator between low
|
||||
// and high hash.
|
||||
// RequestBlockLocator message. It is used to request a block locator between high
|
||||
// and low hash.
|
||||
// The locator is returned via a locator message (MsgBlockLocator).
|
||||
type MsgRequestBlockLocator struct {
|
||||
baseMessage
|
||||
LowHash *externalapi.DomainHash
|
||||
HighHash *externalapi.DomainHash
|
||||
Limit uint32
|
||||
LowHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
@@ -24,10 +23,9 @@ func (msg *MsgRequestBlockLocator) Command() MessageCommand {
|
||||
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgRequestBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
|
||||
func NewMsgRequestBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestBlockLocator {
|
||||
return &MsgRequestBlockLocator{
|
||||
LowHash: lowHash,
|
||||
HighHash: highHash,
|
||||
Limit: limit,
|
||||
LowHash: lowHash,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestRequestBlockLocator(t *testing.T) {
|
||||
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(9)
|
||||
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{}, 0)
|
||||
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{})
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
|
||||
cmd, wantCmd)
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MaxRequestIBDBlocksHashes is the maximum number of hashes that can
|
||||
// be in a single RequestIBDBlocks message.
|
||||
const MaxRequestIBDBlocksHashes = MaxInvPerMsg
|
||||
|
||||
// MsgRequestIBDBlocks implements the Message interface and represents a kaspa
|
||||
// RequestIBDBlocks message. It is used to request blocks as part of the IBD
|
||||
// protocol.
|
||||
|
||||
21
app/appmessage/p2p_msgrequestselectedtip.go
Normal file
21
app/appmessage/p2p_msgrequestselectedtip.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package appmessage
|
||||
|
||||
// MsgRequestSelectedTip implements the Message interface and represents a kaspa
|
||||
// RequestSelectedTip message. It is used to request the selected tip of another peer.
|
||||
//
|
||||
// This message has no payload.
|
||||
type MsgRequestSelectedTip struct {
|
||||
baseMessage
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgRequestSelectedTip) Command() MessageCommand {
|
||||
return CmdRequestSelectedTip
|
||||
}
|
||||
|
||||
// NewMsgRequestSelectedTip returns a new kaspa RequestSelectedTip message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgRequestSelectedTip() *MsgRequestSelectedTip {
|
||||
return &MsgRequestSelectedTip{}
|
||||
}
|
||||
20
app/appmessage/p2p_msgrequestselectedtip_test.go
Normal file
20
app/appmessage/p2p_msgrequestselectedtip_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2013-2016 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestRequestSelectedTip tests the MsgRequestSelectedTip API.
|
||||
func TestRequestSelectedTip(t *testing.T) {
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(12)
|
||||
msg := NewMsgRequestSelectedTip()
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgRequestSelectedTip: wrong command - got %v want %v",
|
||||
cmd, wantCmd)
|
||||
}
|
||||
}
|
||||
28
app/appmessage/p2p_msgselectedtip.go
Normal file
28
app/appmessage/p2p_msgselectedtip.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// MsgSelectedTip implements the Message interface and represents a kaspa
|
||||
// selectedtip message. It is used to answer getseltip messages and tell
|
||||
// the asking peer what is the selected tip of this peer.
|
||||
type MsgSelectedTip struct {
|
||||
baseMessage
|
||||
// The selected tip hash of the generator of the message.
|
||||
SelectedTipHash *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgSelectedTip) Command() MessageCommand {
|
||||
return CmdSelectedTip
|
||||
}
|
||||
|
||||
// NewMsgSelectedTip returns a new kaspa selectedtip message that conforms to the
|
||||
// Message interface.
|
||||
func NewMsgSelectedTip(selectedTipHash *externalapi.DomainHash) *MsgSelectedTip {
|
||||
return &MsgSelectedTip{
|
||||
SelectedTipHash: selectedTipHash,
|
||||
}
|
||||
}
|
||||
18
app/appmessage/p2p_msgselectedtip_test.go
Normal file
18
app/appmessage/p2p_msgselectedtip_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package appmessage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// TestSelectedTip tests the MsgSelectedTip API.
|
||||
func TestSelectedTip(t *testing.T) {
|
||||
// Ensure the command is expected value.
|
||||
wantCmd := MessageCommand(11)
|
||||
msg := NewMsgSelectedTip(&externalapi.DomainHash{})
|
||||
if cmd := msg.Command(); cmd != wantCmd {
|
||||
t.Errorf("NewMsgSelectedTip: wrong command - got %v want %v",
|
||||
cmd, wantCmd)
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
|
||||
@@ -162,12 +162,12 @@ func (msg *MsgTx) IsCoinBase() bool {
|
||||
|
||||
// TxHash generates the Hash for the transaction.
|
||||
func (msg *MsgTx) TxHash() *externalapi.DomainHash {
|
||||
return consensushashing.TransactionHash(MsgTxToDomainTransaction(msg))
|
||||
return consensusserialization.TransactionHash(MsgTxToDomainTransaction(msg))
|
||||
}
|
||||
|
||||
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
|
||||
func (msg *MsgTx) TxID() *externalapi.DomainTransactionID {
|
||||
return consensushashing.TransactionID(MsgTxToDomainTransaction(msg))
|
||||
return consensusserialization.TransactionID(MsgTxToDomainTransaction(msg))
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of a transaction so that the original does not get
|
||||
|
||||
@@ -53,6 +53,9 @@ type MsgVersion struct {
|
||||
// on the appmessage. This has a max length of MaxUserAgentLen.
|
||||
UserAgent string
|
||||
|
||||
// The selected tip hash of the generator of the version message.
|
||||
SelectedTipHash *externalapi.DomainHash
|
||||
|
||||
// Don't announce transactions to peer.
|
||||
DisableRelayTx bool
|
||||
|
||||
@@ -82,7 +85,7 @@ func (msg *MsgVersion) Command() MessageCommand {
|
||||
// Message interface using the passed parameters and defaults for the remaining
|
||||
// fields.
|
||||
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
|
||||
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
|
||||
selectedTipHash *externalapi.DomainHash, subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
|
||||
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
@@ -94,6 +97,7 @@ func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
|
||||
Address: addr,
|
||||
ID: id,
|
||||
UserAgent: DefaultUserAgent,
|
||||
SelectedTipHash: selectedTipHash,
|
||||
DisableRelayTx: false,
|
||||
SubnetworkID: subnetworkID,
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
|
||||
)
|
||||
|
||||
@@ -18,6 +19,7 @@ func TestVersion(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// Create version message data.
|
||||
selectedTipHash := &externalapi.DomainHash{12, 34}
|
||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
||||
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
|
||||
generatedID, err := id.GenerateID()
|
||||
@@ -26,7 +28,7 @@ func TestVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Ensure we get the correct data back out.
|
||||
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
|
||||
msg := NewMsgVersion(me, generatedID, "mainnet", selectedTipHash, nil)
|
||||
if msg.ProtocolVersion != pver {
|
||||
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
|
||||
msg.ProtocolVersion, pver)
|
||||
@@ -43,6 +45,10 @@ func TestVersion(t *testing.T) {
|
||||
t.Errorf("NewMsgVersion: wrong user agent - got %v, want %v",
|
||||
msg.UserAgent, DefaultUserAgent)
|
||||
}
|
||||
if *msg.SelectedTipHash != *selectedTipHash {
|
||||
t.Errorf("NewMsgVersion: wrong selected tip hash - got %s, want %s",
|
||||
msg.SelectedTipHash, selectedTipHash)
|
||||
}
|
||||
if msg.DisableRelayTx {
|
||||
t.Errorf("NewMsgVersion: disable relay tx is not false by "+
|
||||
"default - got %v, want %v", msg.DisableRelayTx, false)
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package appmessage
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// GetBlockCountRequestMessage is an appmessage corresponding to
|
||||
// its respective RPC message
|
||||
type GetBlockCountRequestMessage struct {
|
||||
@@ -22,8 +20,7 @@ func NewGetBlockCountRequestMessage() *GetBlockCountRequestMessage {
|
||||
// its respective RPC message
|
||||
type GetBlockCountResponseMessage struct {
|
||||
baseMessage
|
||||
BlockCount uint64
|
||||
HeaderCount uint64
|
||||
BlockCount uint64
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -34,9 +31,8 @@ func (msg *GetBlockCountResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockCountResponseMessage returns a instance of the message
|
||||
func NewGetBlockCountResponseMessage(syncInfo *externalapi.SyncInfo) *GetBlockCountResponseMessage {
|
||||
func NewGetBlockCountResponseMessage(blockCount uint64) *GetBlockCountResponseMessage {
|
||||
return &GetBlockCountResponseMessage{
|
||||
BlockCount: syncInfo.BlockCount,
|
||||
HeaderCount: syncInfo.HeaderCount,
|
||||
BlockCount: blockCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ type GetBlockDAGInfoResponseMessage struct {
|
||||
baseMessage
|
||||
NetworkName string
|
||||
BlockCount uint64
|
||||
HeaderCount uint64
|
||||
TipHashes []string
|
||||
VirtualParentHashes []string
|
||||
Difficulty float64
|
||||
|
||||
@@ -41,6 +41,8 @@ type GetConnectedPeerInfoMessage struct {
|
||||
ID string
|
||||
Address string
|
||||
LastPingDuration int64
|
||||
SelectedTipHash string
|
||||
IsSyncNode bool
|
||||
IsOutbound bool
|
||||
TimeOffset int64
|
||||
UserAgent string
|
||||
|
||||
@@ -49,8 +49,8 @@ type ChainBlock struct {
|
||||
|
||||
// AcceptedBlock represents a block accepted into the DAG
|
||||
type AcceptedBlock struct {
|
||||
Hash string
|
||||
AcceptedTransactionIDs []string
|
||||
Hash string
|
||||
AcceptedTxIDs []string
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
|
||||
@@ -140,7 +140,9 @@ func (a *ComponentManager) maybeSeedFromDNS() {
|
||||
// source. So we'll take first returned address as source.
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
}
|
||||
|
||||
if a.cfg.GRPCSeed != "" {
|
||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
func(addresses []*appmessage.NetAddress) {
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/blocklogger"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/pkg/errors"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
@@ -18,25 +17,18 @@ import (
|
||||
// relays newly unorphaned transactions and possibly rebroadcast
|
||||
// manually added transactions when not in IBD.
|
||||
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error {
|
||||
hash := consensushashing.BlockHash(block)
|
||||
log.Debugf("OnNewBlock start for block %s", hash)
|
||||
defer log.Debugf("OnNewBlock end for block %s", hash)
|
||||
unorphanedBlocks, err := f.UnorphanBlocks(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks))
|
||||
|
||||
newBlocks := append([]*externalapi.DomainBlock{block}, unorphanedBlocks...)
|
||||
for _, newBlock := range newBlocks {
|
||||
blocklogger.LogBlock(block)
|
||||
|
||||
log.Tracef("OnNewBlock: passing block %s transactions to mining manager", hash)
|
||||
_ = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
|
||||
|
||||
if f.onBlockAddedToDAGHandler != nil {
|
||||
log.Tracef("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
|
||||
err := f.onBlockAddedToDAGHandler(newBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -53,7 +45,7 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
f.updateTransactionsToRebroadcast(block)
|
||||
|
||||
// Don't relay transactions when in IBD.
|
||||
if f.IsIBDRunning() {
|
||||
if atomic.LoadUint32(&f.isInIBD) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -64,7 +56,7 @@ func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
|
||||
|
||||
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
|
||||
for i, tx := range transactionsAcceptedToMempool {
|
||||
txIDsToBroadcast[i] = consensushashing.TransactionID(tx)
|
||||
txIDsToBroadcast[i] = consensusserialization.TransactionID(tx)
|
||||
}
|
||||
offset := len(transactionsAcceptedToMempool)
|
||||
for i, txID := range txIDsToRebroadcast {
|
||||
@@ -89,10 +81,10 @@ func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks
|
||||
|
||||
// AddBlock adds the given block to the DAG and propagates it.
|
||||
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
_, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
err := f.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Infof("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
|
||||
log.Infof("Validation failed for block %s: %s", consensusserialization.BlockHash(block), err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@@ -101,29 +93,5 @@ func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Broadcast(appmessage.NewMsgInvBlock(consensushashing.BlockHash(block)))
|
||||
}
|
||||
|
||||
// IsIBDRunning returns true if IBD is currently marked as running
|
||||
func (f *FlowContext) IsIBDRunning() bool {
|
||||
return atomic.LoadUint32(&f.isInIBD) != 0
|
||||
}
|
||||
|
||||
// TrySetIBDRunning attempts to set `isInIBD`. Returns false
|
||||
// if it is already set
|
||||
func (f *FlowContext) TrySetIBDRunning() bool {
|
||||
succeeded := atomic.CompareAndSwapUint32(&f.isInIBD, 0, 1)
|
||||
if succeeded {
|
||||
log.Infof("IBD started")
|
||||
}
|
||||
return succeeded
|
||||
}
|
||||
|
||||
// UnsetIBDRunning unsets isInIBD
|
||||
func (f *FlowContext) UnsetIBDRunning() {
|
||||
succeeded := atomic.CompareAndSwapUint32(&f.isInIBD, 1, 0)
|
||||
if !succeeded {
|
||||
panic("attempted to unset isInIBD when it was not set to begin with")
|
||||
}
|
||||
log.Infof("IBD finished")
|
||||
return f.Broadcast(appmessage.NewMsgInvBlock(consensusserialization.BlockHash(block)))
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
@@ -41,11 +41,13 @@ type FlowContext struct {
|
||||
transactionsToRebroadcastLock sync.Mutex
|
||||
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
|
||||
lastRebroadcastTime time.Time
|
||||
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
|
||||
sharedRequestedTransactions *relaytransactions.SharedRequestedTransactions
|
||||
|
||||
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
|
||||
|
||||
isInIBD uint32
|
||||
isInIBD uint32
|
||||
startIBDMutex sync.Mutex
|
||||
ibdPeer *peerpkg.Peer
|
||||
|
||||
peers map[id.ID]*peerpkg.Peer
|
||||
peersMutex sync.RWMutex
|
||||
@@ -64,7 +66,7 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
||||
domain: domain,
|
||||
addressManager: addressManager,
|
||||
connectionManager: connectionManager,
|
||||
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
|
||||
sharedRequestedTransactions: relaytransactions.NewSharedRequestedTransactions(),
|
||||
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
|
||||
peers: make(map[id.ID]*peerpkg.Peer),
|
||||
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
|
||||
|
||||
130
app/protocol/flowcontext/ibd.go
Normal file
130
app/protocol/flowcontext/ibd.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package flowcontext
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
)
|
||||
|
||||
// StartIBDIfRequired selects a peer and starts IBD against it
|
||||
// if required
|
||||
func (f *FlowContext) StartIBDIfRequired() error {
|
||||
f.startIBDMutex.Lock()
|
||||
defer f.startIBDMutex.Unlock()
|
||||
|
||||
if f.IsInIBD() {
|
||||
return nil
|
||||
}
|
||||
|
||||
syncInfo, err := f.domain.Consensus().GetSyncInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if syncInfo.State == externalapi.SyncStateRelay {
|
||||
return nil
|
||||
}
|
||||
|
||||
peer, err := f.selectPeerForIBD(syncInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if peer == nil {
|
||||
spawn("StartIBDIfRequired-requestSelectedTipsIfRequired", f.requestSelectedTipsIfRequired)
|
||||
return nil
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&f.isInIBD, 1)
|
||||
f.ibdPeer = peer
|
||||
spawn("StartIBDIfRequired-peer.StartIBD", peer.StartIBD)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsInIBD is true if IBD is currently running
|
||||
func (f *FlowContext) IsInIBD() bool {
|
||||
return atomic.LoadUint32(&f.isInIBD) != 0
|
||||
}
|
||||
|
||||
// selectPeerForIBD returns the first peer whose selected tip
|
||||
// hash is not in our DAG
|
||||
func (f *FlowContext) selectPeerForIBD(syncInfo *externalapi.SyncInfo) (*peerpkg.Peer, error) {
|
||||
f.peersMutex.RLock()
|
||||
defer f.peersMutex.RUnlock()
|
||||
|
||||
for _, peer := range f.peers {
|
||||
peerSelectedTipHash := peer.SelectedTipHash()
|
||||
|
||||
if f.IsOrphan(peerSelectedTipHash) {
|
||||
continue
|
||||
}
|
||||
|
||||
blockInfo, err := f.domain.Consensus().GetBlockInfo(peerSelectedTipHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if syncInfo.State == externalapi.SyncStateHeadersFirst {
|
||||
if !blockInfo.Exists {
|
||||
return peer, nil
|
||||
}
|
||||
} else {
|
||||
if blockInfo.Exists && blockInfo.BlockStatus == externalapi.StatusHeaderOnly &&
|
||||
blockInfo.IsBlockInHeaderPruningPointFuture {
|
||||
return peer, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *FlowContext) requestSelectedTipsIfRequired() {
|
||||
dagTimeCurrent, err := f.shouldRequestSelectedTips()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if dagTimeCurrent {
|
||||
return
|
||||
}
|
||||
f.requestSelectedTips()
|
||||
}
|
||||
|
||||
func (f *FlowContext) shouldRequestSelectedTips() (bool, error) {
|
||||
const minDurationToRequestSelectedTips = time.Minute
|
||||
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
virtualSelectedParentTime := mstime.UnixMilliseconds(virtualSelectedParent.Header.TimeInMilliseconds)
|
||||
return mstime.Now().Sub(virtualSelectedParentTime) > minDurationToRequestSelectedTips, nil
|
||||
}
|
||||
|
||||
func (f *FlowContext) requestSelectedTips() {
|
||||
f.peersMutex.RLock()
|
||||
defer f.peersMutex.RUnlock()
|
||||
|
||||
for _, peer := range f.peers {
|
||||
peer.RequestSelectedTipIfRequired()
|
||||
}
|
||||
}
|
||||
|
||||
// FinishIBD finishes the current IBD flow and starts a new one if required.
|
||||
func (f *FlowContext) FinishIBD() error {
|
||||
f.ibdPeer = nil
|
||||
|
||||
atomic.StoreUint32(&f.isInIBD, 0)
|
||||
|
||||
return f.StartIBDIfRequired()
|
||||
}
|
||||
|
||||
// IBDPeer returns the currently active IBD peer.
|
||||
// Returns nil if we aren't currently in IBD
|
||||
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
|
||||
if !f.IsInIBD() {
|
||||
return nil
|
||||
}
|
||||
return f.ibdPeer
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package flowcontext
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,7 +12,7 @@ func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
|
||||
f.orphansMutex.Lock()
|
||||
defer f.orphansMutex.Unlock()
|
||||
|
||||
orphanHash := consensushashing.BlockHash(orphanBlock)
|
||||
orphanHash := consensusserialization.BlockHash(orphanBlock)
|
||||
f.orphans[*orphanHash] = orphanBlock
|
||||
|
||||
log.Infof("Received a block with missing parents, adding to orphan pool: %s", orphanHash)
|
||||
@@ -34,7 +34,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
|
||||
|
||||
// Find all the children of rootBlock among the orphans
|
||||
// and add them to the process queue
|
||||
rootBlockHash := consensushashing.BlockHash(rootBlock)
|
||||
rootBlockHash := consensusserialization.BlockHash(rootBlock)
|
||||
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
|
||||
|
||||
var unorphanedBlocks []*externalapi.DomainBlock
|
||||
@@ -52,7 +52,7 @@ func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*ext
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !orphanBlockParentInfo.Exists || orphanBlockParentInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
if !orphanBlockParentInfo.Exists {
|
||||
log.Tracef("Cannot unorphan block %s. It's missing at "+
|
||||
"least the following parent: %s", orphanHash, orphanBlockParentHash)
|
||||
|
||||
@@ -116,7 +116,7 @@ func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) error {
|
||||
}
|
||||
delete(f.orphans, orphanHash)
|
||||
|
||||
_, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
|
||||
err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
|
||||
if err != nil {
|
||||
if errors.As(err, &ruleerrors.RuleError{}) {
|
||||
log.Infof("Validation failed for orphan block %s: %s", orphanHash, err)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
)
|
||||
|
||||
// AddTransaction adds transaction to the mempool and propagates it.
|
||||
@@ -19,7 +19,7 @@ func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
|
||||
return err
|
||||
}
|
||||
|
||||
transactionID := consensushashing.TransactionID(tx)
|
||||
transactionID := consensusserialization.TransactionID(tx)
|
||||
f.transactionsToRebroadcast[*transactionID] = tx
|
||||
inv := appmessage.NewMsgInvTransaction([]*externalapi.DomainTransactionID{transactionID})
|
||||
return f.Broadcast(inv)
|
||||
@@ -32,7 +32,7 @@ func (f *FlowContext) updateTransactionsToRebroadcast(block *externalapi.DomainB
|
||||
// anymore, although they are not included in the UTXO set.
|
||||
// This is probably ok, since red blocks are quite rare.
|
||||
for _, tx := range block.Transactions {
|
||||
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
|
||||
delete(f.transactionsToRebroadcast, *consensusserialization.TransactionID(tx))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,15 +48,15 @@ func (f *FlowContext) txIDsToRebroadcast() []*externalapi.DomainTransactionID {
|
||||
txIDs := make([]*externalapi.DomainTransactionID, len(f.transactionsToRebroadcast))
|
||||
i := 0
|
||||
for _, tx := range f.transactionsToRebroadcast {
|
||||
txIDs[i] = consensushashing.TransactionID(tx)
|
||||
txIDs[i] = consensusserialization.TransactionID(tx)
|
||||
i++
|
||||
}
|
||||
return txIDs
|
||||
}
|
||||
|
||||
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
|
||||
// SharedRequestedTransactions returns a *relaytransactions.SharedRequestedTransactions for sharing
|
||||
// data about requested transactions between different peers.
|
||||
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
|
||||
func (f *FlowContext) SharedRequestedTransactions() *relaytransactions.SharedRequestedTransactions {
|
||||
return f.sharedRequestedTransactions
|
||||
}
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, limit uint32) error {
|
||||
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(lowHash, highHash, limit)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
return msgBlockLocator.BlockLocatorHashes, nil
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -32,7 +31,7 @@ func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
if !blockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
|
||||
@@ -8,31 +8,25 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blocks"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
mathUtil "github.com/kaspanet/kaspad/util/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// orphanResolutionRange is the maximum amount of blockLocator hashes
|
||||
// to search for known blocks. See isBlockInOrphanResolutionRange for
|
||||
// further details
|
||||
var orphanResolutionRange uint32 = 5
|
||||
|
||||
// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow.
|
||||
type RelayInvsContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
NetAdapter() *netadapter.NetAdapter
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
SharedRequestedBlocks() *SharedRequestedBlocks
|
||||
StartIBDIfRequired() error
|
||||
IsInIBD() bool
|
||||
Broadcast(message appmessage.Message) error
|
||||
AddOrphan(orphanBlock *externalapi.DomainBlock)
|
||||
IsOrphan(blockHash *externalapi.DomainHash) bool
|
||||
IsIBDRunning() bool
|
||||
TrySetIBDRunning() bool
|
||||
UnsetIBDRunning()
|
||||
}
|
||||
|
||||
type handleRelayInvsFlow struct {
|
||||
@@ -59,7 +53,6 @@ func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outg
|
||||
|
||||
func (flow *handleRelayInvsFlow) start() error {
|
||||
for {
|
||||
log.Debugf("Waiting for inv")
|
||||
inv, err := flow.readInv()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -76,54 +69,30 @@ func (flow *handleRelayInvsFlow) start() error {
|
||||
return protocolerrors.Errorf(true, "sent inv of an invalid block %s",
|
||||
inv.Hash)
|
||||
}
|
||||
log.Debugf("Block %s already exists. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if flow.IsOrphan(inv.Hash) {
|
||||
log.Debugf("Block %s is a known orphan. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
// Block relay is disabled during IBD
|
||||
if flow.IsIBDRunning() {
|
||||
log.Debugf("Got block %s while in IBD. continuing...", inv.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Requesting block %s", inv.Hash)
|
||||
block, exists, err := flow.requestBlock(inv.Hash)
|
||||
err = flow.StartIBDIfRequired()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.Debugf("Aborting requesting block %s because it already exists", inv.Hash)
|
||||
if flow.IsInIBD() {
|
||||
// Block relay is disabled during IBD
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Processing block %s", inv.Hash)
|
||||
missingParents, err := flow.processBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(missingParents) > 0 {
|
||||
log.Debugf("Block %s contains orphans: %s", inv.Hash, missingParents)
|
||||
err := flow.processOrphan(block, missingParents)
|
||||
requestQueue := newHashesQueueSet()
|
||||
requestQueue.enqueueIfNotExists(inv.Hash)
|
||||
|
||||
for requestQueue.len() > 0 {
|
||||
err := flow.requestBlocks(requestQueue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Relaying block %s", inv.Hash)
|
||||
err = flow.relayBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Accepted block %s via relay", inv.Hash)
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -148,34 +117,68 @@ func (flow *handleRelayInvsFlow) readInv() (*appmessage.MsgInvRelayBlock, error)
|
||||
return inv, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) {
|
||||
exists := flow.SharedRequestedBlocks().addIfNotExists(requestHash)
|
||||
if exists {
|
||||
return nil, true, nil
|
||||
func (flow *handleRelayInvsFlow) requestBlocks(requestQueue *hashesQueueSet) error {
|
||||
numHashesToRequest := mathUtil.MinInt(appmessage.MaxRequestRelayBlocksHashes, requestQueue.len())
|
||||
hashesToRequest := requestQueue.dequeue(numHashesToRequest)
|
||||
|
||||
pendingBlocks := map[externalapi.DomainHash]struct{}{}
|
||||
var filteredHashesToRequest []*externalapi.DomainHash
|
||||
for _, hash := range hashesToRequest {
|
||||
exists := flow.SharedRequestedBlocks().addIfNotExists(hash)
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// The block can become known from another peer in the process of orphan resolution
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
continue
|
||||
}
|
||||
|
||||
pendingBlocks[*hash] = struct{}{}
|
||||
filteredHashesToRequest = append(filteredHashesToRequest, hash)
|
||||
}
|
||||
|
||||
// In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().remove(requestHash)
|
||||
// Exit early if we've filtered out all the hashes
|
||||
if len(filteredHashesToRequest) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash})
|
||||
// In case the function returns earlier than expected, we want to make sure requestedBlocks is
|
||||
// clean from any pending blocks.
|
||||
defer flow.SharedRequestedBlocks().removeSet(pendingBlocks)
|
||||
|
||||
getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks(filteredHashesToRequest)
|
||||
err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return err
|
||||
}
|
||||
|
||||
msgBlock, err := flow.readMsgBlock()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
for len(pendingBlocks) > 0 {
|
||||
msgBlock, err := flow.readMsgBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if *blockHash != *requestHash {
|
||||
return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
|
||||
}
|
||||
block := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
|
||||
return block, false, nil
|
||||
if _, ok := pendingBlocks[*blockHash]; !ok {
|
||||
return protocolerrors.Errorf(true, "got unrequested block %s", blockHash)
|
||||
}
|
||||
|
||||
err = flow.processAndRelayBlock(requestQueue, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(pendingBlocks, *blockHash)
|
||||
flow.SharedRequestedBlocks().remove(blockHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive.
|
||||
@@ -199,91 +202,68 @@ func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock,
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
_, err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
func (flow *handleRelayInvsFlow) processAndRelayBlock(requestQueue *hashesQueueSet, block *externalapi.DomainBlock) error {
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
err := flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return nil, errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
return errors.Wrapf(err, "failed to process block %s", blockHash)
|
||||
}
|
||||
|
||||
missingParentsError := &ruleerrors.ErrMissingParents{}
|
||||
if errors.As(err, missingParentsError) {
|
||||
return missingParentsError.MissingParentHashes, nil
|
||||
blueScore, err := blocks.ExtractBlueScore(block)
|
||||
if err != nil {
|
||||
return protocolerrors.Errorf(true, "received an orphan "+
|
||||
"block %s with malformed blue score", blockHash)
|
||||
}
|
||||
|
||||
const maxOrphanBlueScoreDiff = 10000
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selectedTipBlueScore, err := blocks.ExtractBlueScore(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if blueScore > selectedTipBlueScore+maxOrphanBlueScoreDiff {
|
||||
log.Infof("Orphan block %s has blue score %d and the selected tip blue score is "+
|
||||
"%d. Ignoring orphans with a blue score difference from the selected tip greater than %d",
|
||||
blockHash, blueScore, selectedTipBlueScore, maxOrphanBlueScoreDiff)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the orphan to the orphan pool
|
||||
flow.AddOrphan(block)
|
||||
|
||||
// Request the parents for the orphan block from the peer that sent it.
|
||||
for _, missingAncestor := range missingParentsError.MissingParentHashes {
|
||||
requestQueue.enqueueIfNotExists(missingAncestor)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
log.Infof("Rejected block %s from %s: %s", blockHash, flow.peer, err)
|
||||
|
||||
func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
|
||||
// Return if the block has been orphaned from elsewhere already
|
||||
if flow.IsOrphan(blockHash) {
|
||||
log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash)
|
||||
return nil
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash)
|
||||
}
|
||||
|
||||
// Add the block to the orphan set if it's within orphan resolution range
|
||||
isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash)
|
||||
err = flow.Broadcast(appmessage.NewMsgInvBlock(blockHash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isBlockInOrphanResolutionRange {
|
||||
log.Debugf("Block %s is within orphan resolution range. "+
|
||||
"Adding it to the orphan set and requesting its missing parents", blockHash)
|
||||
flow.addToOrphanSetAndRequestMissingParents(block, missingParents)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start IBD unless we already are in IBD
|
||||
log.Debugf("Block %s is out of orphan resolution range. "+
|
||||
"Attempting to start IBD against it.", blockHash)
|
||||
return flow.runIBDIfNotRunning(blockHash)
|
||||
}
|
||||
log.Infof("Accepted block %s via relay", blockHash)
|
||||
|
||||
// isBlockInOrphanResolutionRange finds out whether the given blockHash should be
|
||||
// retrieved via the unorphaning mechanism or via IBD. This method sends a
|
||||
// getBlockLocator request to the peer with a limit of orphanResolutionRange.
|
||||
// In the response, if we know none of the hashes, we should retrieve the given
|
||||
// blockHash via IBD. Otherwise, via unorphaning.
|
||||
func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
lowHash := flow.Config().ActiveNetParams.GenesisHash
|
||||
err := flow.sendGetBlockLocator(lowHash, blockHash, orphanResolutionRange)
|
||||
err = flow.StartIBDIfRequired()
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
for _, blockLocatorHash := range blockLocatorHashes {
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) addToOrphanSetAndRequestMissingParents(
|
||||
block *externalapi.DomainBlock, missingParents []*externalapi.DomainHash) {
|
||||
|
||||
flow.AddOrphan(block)
|
||||
invMessages := make([]*appmessage.MsgInvRelayBlock, len(missingParents))
|
||||
for i, missingParent := range missingParents {
|
||||
invMessages[i] = appmessage.NewMsgInvBlock(missingParent)
|
||||
}
|
||||
flow.invsQueue = append(invMessages, flow.invsQueue...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,313 +0,0 @@
|
||||
package blockrelay
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (flow *handleRelayInvsFlow) runIBDIfNotRunning(highHash *externalapi.DomainHash) error {
|
||||
wasIBDNotRunning := flow.TrySetIBDRunning()
|
||||
if !wasIBDNotRunning {
|
||||
log.Debugf("IBD is already running")
|
||||
return nil
|
||||
}
|
||||
defer flow.UnsetIBDRunning()
|
||||
|
||||
log.Debugf("IBD started with peer %s and highHash %s", flow.peer, highHash)
|
||||
|
||||
// Fetch all the headers if we don't already have them
|
||||
log.Debugf("Downloading headers up to %s", highHash)
|
||||
err := flow.syncHeaders(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Finished downloading headers up to %s", highHash)
|
||||
|
||||
// Fetch the UTXO set if we don't already have it
|
||||
log.Debugf("Downloading the IBD root UTXO set under highHash %s", highHash)
|
||||
syncInfo, err := flow.Domain().Consensus().GetSyncInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if syncInfo.IsAwaitingUTXOSet {
|
||||
found, err := flow.fetchMissingUTXOSet(syncInfo.IBDRootUTXOBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
log.Infof("Cannot download the IBD root UTXO set under highHash %s", highHash)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
log.Debugf("Finished downloading the IBD root UTXO set under highHash %s", highHash)
|
||||
|
||||
// Fetch the block bodies
|
||||
log.Debugf("Downloading block bodies up to %s", highHash)
|
||||
err = flow.syncMissingBlockBodies(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Finished downloading block bodies up to %s", highHash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncHeaders(highHash *externalapi.DomainHash) error {
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with high hash %s", flow.peer, highHash)
|
||||
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
return flow.downloadHeaders(highestSharedBlockHash, highHash)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) findHighestSharedBlockHash(highHash *externalapi.DomainHash) (
|
||||
lowHash *externalapi.DomainHash, err error) {
|
||||
|
||||
lowHash = flow.Config().ActiveNetParams.GenesisHash
|
||||
currentHighHash := highHash
|
||||
|
||||
for {
|
||||
err := flow.sendGetBlockLocator(lowHash, currentHighHash, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We check whether the locator's highest hash is in the local DAG.
|
||||
// If it is, return it. If it isn't, we need to narrow our
|
||||
// getBlockLocator request and try again.
|
||||
locatorHighHash := blockLocatorHashes[0]
|
||||
locatorHighHashInfo, err := flow.Domain().Consensus().GetBlockInfo(locatorHighHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if locatorHighHashInfo.Exists {
|
||||
return locatorHighHash, nil
|
||||
}
|
||||
|
||||
lowHash, currentHighHash, err = flow.Domain().Consensus().FindNextBlockLocatorBoundaries(blockLocatorHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash) error {
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blocksReceived := 0
|
||||
for {
|
||||
msgBlockHeader, doneIBD, err := flow.receiveHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if doneIBD {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = flow.processHeader(msgBlockHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blocksReceived++
|
||||
if blocksReceived%ibdBatchSize == 0 {
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveHeader() (msgIBDBlock *appmessage.MsgBlockHeader, doneIBD bool, err error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgBlockHeader:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s", appmessage.CmdHeader, appmessage.CmdDoneHeaders, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", blockHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) fetchMissingUTXOSet(ibdRootHash *externalapi.DomainHash) (succeed bool, err error) {
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDRootUTXOSetAndBlock(ibdRootHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
utxoSet, block, found, err := flow.receiveIBDRootUTXOSetAndBlock()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = flow.Domain().Consensus().ValidateAndInsertPruningPoint(block, utxoSet)
|
||||
if err != nil {
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with IBD root UTXO set")
|
||||
}
|
||||
|
||||
syncInfo, err := flow.Domain().Consensus().GetSyncInfo()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// TODO: Find a better way to deal with finality conflicts.
|
||||
if syncInfo.IsAwaitingUTXOSet {
|
||||
log.Warnf("Still awaiting for UTXO set. This can happen only because the given pruning point violates " +
|
||||
"finality. If this keeps happening delete the data directory and restart your node.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) receiveIBDRootUTXOSetAndBlock() ([]byte, *externalapi.DomainBlock, bool, error) {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDRootUTXOSetAndBlock:
|
||||
return message.UTXOSet,
|
||||
appmessage.MsgBlockToDomainBlock(message.Block), true, nil
|
||||
case *appmessage.MsgIBDRootNotFound:
|
||||
return nil, nil, false, nil
|
||||
default:
|
||||
return nil, nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdIBDRootUTXOSetAndBlock, appmessage.CmdIBDRootNotFound, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRelayInvsFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += ibdBatchSize {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+ibdBatchSize < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+ibdBatchSize]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
if *expectedHash != *blockHash {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
_, err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dequeueIncomingMessageAndSkipInvs is a convenience method to be used during
|
||||
// IBD. Inv messages are expected to arrive at any given moment, but should be
|
||||
// ignored while we're in IBD
|
||||
func (flow *handleRelayInvsFlow) dequeueIncomingMessageAndSkipInvs(timeout time.Duration) (appmessage.Message, error) {
|
||||
for {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := message.(*appmessage.MsgInvRelayBlock); !ok {
|
||||
return message, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ type HandleHandshakeContext interface {
|
||||
NetAdapter() *netadapter.NetAdapter
|
||||
Domain() domain.Domain
|
||||
AddressManager() *addressmanager.AddressManager
|
||||
StartIBDIfRequired() error
|
||||
AddToPeers(peer *peerpkg.Peer) error
|
||||
HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error)
|
||||
}
|
||||
@@ -91,6 +92,12 @@ func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.N
|
||||
if peerAddress != nil {
|
||||
context.AddressManager().AddAddresses(peerAddress)
|
||||
}
|
||||
|
||||
err = context.StartIBDIfRequired()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@@ -42,16 +41,11 @@ func ReceiveVersion(context HandleHandshakeContext, incomingRoute *router.Route,
|
||||
}
|
||||
|
||||
func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "receiveVersionFlow.start()")
|
||||
defer onEnd()
|
||||
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("Got version message")
|
||||
|
||||
msgVersion, ok := message.(*appmessage.MsgVersion)
|
||||
if !ok {
|
||||
return nil, protocolerrors.New(true, "a version message must precede all others")
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/kaspanet/kaspad/version"
|
||||
)
|
||||
@@ -48,14 +48,17 @@ func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route,
|
||||
}
|
||||
|
||||
func (flow *sendVersionFlow) start() error {
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "sendVersionFlow.start")
|
||||
defer onEnd()
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selectedTipHash := consensusserialization.BlockHash(virtualSelectedParent)
|
||||
subnetworkID := flow.Config().SubnetworkID
|
||||
|
||||
// Version message.
|
||||
localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress())
|
||||
subnetworkID := flow.Config().SubnetworkID
|
||||
msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(),
|
||||
flow.Config().ActiveNetParams.Name, subnetworkID)
|
||||
flow.Config().ActiveNetParams.Name, selectedTipHash, subnetworkID)
|
||||
msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...)
|
||||
|
||||
// Advertise the services flag
|
||||
@@ -67,17 +70,15 @@ func (flow *sendVersionFlow) start() error {
|
||||
// Advertise if inv messages for transactions are desired.
|
||||
msg.DisableRelayTx = flow.Config().BlocksOnly
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(msg)
|
||||
err = flow.outgoingRoute.Enqueue(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for verack
|
||||
log.Debugf("Waiting for verack")
|
||||
_, err = flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("Got verack")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package blockrelay
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -31,7 +30,7 @@ func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
if !blockInfo.Exists {
|
||||
return protocolerrors.Errorf(true, "block %s not found", hash)
|
||||
}
|
||||
block, err := context.Domain().Consensus().GetBlock(hash)
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockrelay
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -32,16 +32,13 @@ func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
for {
|
||||
lowHash, highHash, limit, err := flow.receiveGetBlockLocator()
|
||||
lowHash, highHash, err := flow.receiveGetBlockLocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash, limit)
|
||||
locator, err := flow.Domain().Consensus().CreateBlockLocator(lowHash, highHash)
|
||||
if err != nil || len(locator) == 0 {
|
||||
if err != nil {
|
||||
log.Debugf("Received error from CreateBlockLocator: %s", err)
|
||||
}
|
||||
return protocolerrors.Errorf(true, "couldn't build a block "+
|
||||
"locator between blocks %s and %s", lowHash, highHash)
|
||||
}
|
||||
@@ -54,15 +51,15 @@ func (flow *handleRequestBlockLocatorFlow) start() error {
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (lowHash *externalapi.DomainHash,
|
||||
highHash *externalapi.DomainHash, limit uint32, err error) {
|
||||
highHash *externalapi.DomainHash, err error) {
|
||||
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
return nil, nil, err
|
||||
}
|
||||
msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator)
|
||||
|
||||
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil
|
||||
return msgGetBlockLocator.LowHash, msgGetBlockLocator.HighHash, nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error {
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockrelay
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockrelay
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockrelay
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
355
app/protocol/flows/ibd/ibd.go
Normal file
355
app/protocol/flows/ibd/ibd.go
Normal file
@@ -0,0 +1,355 @@
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/infrastructure/config"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleIBDContext is the interface for the context needed for the HandleIBD flow.
|
||||
type HandleIBDContext interface {
|
||||
Domain() domain.Domain
|
||||
Config() *config.Config
|
||||
OnNewBlock(block *externalapi.DomainBlock) error
|
||||
FinishIBD() error
|
||||
}
|
||||
|
||||
type handleIBDFlow struct {
|
||||
HandleIBDContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// HandleIBD waits for IBD start and handles it when IBD is triggered for this peer
|
||||
func HandleIBD(context HandleIBDContext, incomingRoute *router.Route, outgoingRoute *router.Route,
|
||||
peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &handleIBDFlow{
|
||||
HandleIBDContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) start() error {
|
||||
for {
|
||||
err := flow.runIBD()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) runIBD() error {
|
||||
flow.peer.WaitForIBDStart()
|
||||
err := flow.ibdLoop()
|
||||
if err != nil {
|
||||
finishIBDErr := flow.FinishIBD()
|
||||
if finishIBDErr != nil {
|
||||
return finishIBDErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
return flow.FinishIBD()
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) ibdLoop() error {
|
||||
for {
|
||||
syncInfo, err := flow.Domain().Consensus().GetSyncInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch syncInfo.State {
|
||||
case externalapi.SyncStateHeadersFirst:
|
||||
err := flow.syncHeaders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case externalapi.SyncStateMissingUTXOSet:
|
||||
found, err := flow.fetchMissingUTXOSet(syncInfo.IBDRootUTXOBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
case externalapi.SyncStateMissingBlockBodies:
|
||||
err := flow.syncMissingBlockBodies()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case externalapi.SyncStateRelay:
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("unexpected state %s", syncInfo.State)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncHeaders() error {
|
||||
peerSelectedTipHash := flow.peer.SelectedTipHash()
|
||||
log.Debugf("Trying to find highest shared chain block with peer %s with selected tip %s", flow.peer, peerSelectedTipHash)
|
||||
highestSharedBlockHash, err := flow.findHighestSharedBlockHash(peerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Found highest shared chain block %s with peer %s", highestSharedBlockHash, flow.peer)
|
||||
|
||||
return flow.downloadHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) syncMissingBlockBodies() error {
|
||||
hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(flow.peer.SelectedTipHash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for offset := 0; offset < len(hashes); offset += appmessage.MaxRequestIBDBlocksHashes {
|
||||
var hashesToRequest []*externalapi.DomainHash
|
||||
if offset+appmessage.MaxRequestIBDBlocksHashes < len(hashes) {
|
||||
hashesToRequest = hashes[offset : offset+appmessage.MaxRequestIBDBlocksHashes]
|
||||
} else {
|
||||
hashesToRequest = hashes[offset:]
|
||||
}
|
||||
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, expectedHash := range hashesToRequest {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock)
|
||||
if !ok {
|
||||
return protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command())
|
||||
}
|
||||
|
||||
block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock)
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
if *expectedHash != *blockHash {
|
||||
return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash)
|
||||
}
|
||||
|
||||
err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash)
|
||||
}
|
||||
err = flow.OnNewBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) fetchMissingUTXOSet(ibdRootHash *externalapi.DomainHash) (bool, error) {
|
||||
err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDRootUTXOSetAndBlock(ibdRootHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
utxoSet, block, found, err := flow.receiveIBDRootUTXOSetAndBlock()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "got invalid block %s during IBD", blockHash)
|
||||
}
|
||||
|
||||
err = flow.Domain().Consensus().SetPruningPointUTXOSet(utxoSet)
|
||||
if err != nil {
|
||||
return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with IBD root UTXO set")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveIBDRootUTXOSetAndBlock() ([]byte, *externalapi.DomainBlock, bool, error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgIBDRootUTXOSetAndBlock:
|
||||
return message.UTXOSet,
|
||||
appmessage.MsgBlockToDomainBlock(message.Block), true, nil
|
||||
case *appmessage.MsgIBDRootNotFound:
|
||||
return nil, nil, false, nil
|
||||
default:
|
||||
return nil, nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s",
|
||||
appmessage.CmdIBDRootUTXOSetAndBlock, appmessage.CmdIBDRootNotFound, message.Command(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) findHighestSharedBlockHash(peerSelectedTipHash *externalapi.DomainHash) (lowHash *externalapi.DomainHash,
|
||||
err error) {
|
||||
|
||||
lowHash = flow.Config().ActiveNetParams.GenesisHash
|
||||
highHash := peerSelectedTipHash
|
||||
|
||||
for {
|
||||
err := flow.sendGetBlockLocator(lowHash, highHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockLocatorHashes, err := flow.receiveBlockLocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We check whether the locator's highest hash is in the local DAG.
|
||||
// If it is, return it. If it isn't, we need to narrow our
|
||||
// getBlockLocator request and try again.
|
||||
locatorHighHash := blockLocatorHashes[0]
|
||||
locatorHighHashInfo, err := flow.Domain().Consensus().GetBlockInfo(locatorHighHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if locatorHighHashInfo.Exists {
|
||||
return locatorHighHash, nil
|
||||
}
|
||||
|
||||
highHash, lowHash, err = flow.Domain().Consensus().FindNextBlockLocatorBoundaries(blockLocatorHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash, highHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, lowHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
|
||||
if !ok {
|
||||
return nil,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
|
||||
}
|
||||
return msgBlockLocator.BlockLocatorHashes, nil
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) downloadHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
err := flow.sendRequestHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blocksReceived := 0
|
||||
for {
|
||||
msgBlockHeader, doneIBD, err := flow.receiveHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if doneIBD {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = flow.processHeader(msgBlockHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blocksReceived++
|
||||
if blocksReceived%ibdBatchSize == 0 {
|
||||
err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) sendRequestHeaders(highestSharedBlockHash *externalapi.DomainHash,
|
||||
peerSelectedTipHash *externalapi.DomainHash) error {
|
||||
|
||||
msgGetBlockInvs := appmessage.NewMsgRequstHeaders(highestSharedBlockHash, peerSelectedTipHash)
|
||||
return flow.outgoingRoute.Enqueue(msgGetBlockInvs)
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) receiveHeader() (msgIBDBlock *appmessage.MsgBlockHeader, doneIBD bool, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
switch message := message.(type) {
|
||||
case *appmessage.MsgBlockHeader:
|
||||
return message, false, nil
|
||||
case *appmessage.MsgDoneHeaders:
|
||||
return nil, true, nil
|
||||
default:
|
||||
return nil, false,
|
||||
protocolerrors.Errorf(true, "received unexpected message type. "+
|
||||
"expected: %s or %s, got: %s", appmessage.CmdHeader, appmessage.CmdDoneHeaders, message.Command())
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleIBDFlow) processHeader(msgBlockHeader *appmessage.MsgBlockHeader) error {
|
||||
header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader)
|
||||
block := &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: nil,
|
||||
}
|
||||
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blockInfo.Exists {
|
||||
log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash)
|
||||
return nil
|
||||
}
|
||||
err = flow.Domain().Consensus().ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
if !errors.As(err, &ruleerrors.RuleError{}) {
|
||||
return errors.Wrapf(err, "failed to process header %s during IBD", blockHash)
|
||||
}
|
||||
log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err)
|
||||
|
||||
return protocolerrors.Wrapf(true, err, "got invalid block %s during IBD", blockHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
9
app/protocol/flows/ibd/log.go
Normal file
9
app/protocol/flows/ibd/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package ibd
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.IBDS)
|
||||
var spawn = panics.GoroutineWrapperFunc(log)
|
||||
@@ -0,0 +1,66 @@
|
||||
package selectedtip
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HandleRequestSelectedTipContext is the interface for the context needed for the HandleRequestSelectedTip flow.
|
||||
type HandleRequestSelectedTipContext interface {
|
||||
Domain() domain.Domain
|
||||
}
|
||||
|
||||
type handleRequestSelectedTipFlow struct {
|
||||
HandleRequestSelectedTipContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
}
|
||||
|
||||
// HandleRequestSelectedTip handles getSelectedTip messages
|
||||
func HandleRequestSelectedTip(context HandleRequestSelectedTipContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
|
||||
flow := &handleRequestSelectedTipFlow{
|
||||
HandleRequestSelectedTipContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *handleRequestSelectedTipFlow) start() error {
|
||||
for {
|
||||
err := flow.receiveGetSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = flow.sendSelectedTipHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *handleRequestSelectedTipFlow) receiveGetSelectedTip() error {
|
||||
message, err := flow.incomingRoute.Dequeue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := message.(*appmessage.MsgRequestSelectedTip)
|
||||
if !ok {
|
||||
return errors.Errorf("received unexpected message type. "+
|
||||
"expected: %s, got: %s", appmessage.CmdRequestSelectedTip, message.Command())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flow *handleRequestSelectedTipFlow) sendSelectedTipHash() error {
|
||||
virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgSelectedTip := appmessage.NewMsgSelectedTip(consensusserialization.BlockHash(virtualSelectedParent))
|
||||
return flow.outgoingRoute.Enqueue(msgSelectedTip)
|
||||
}
|
||||
78
app/protocol/flows/ibd/selectedtip/request_selected_tip.go
Normal file
78
app/protocol/flows/ibd/selectedtip/request_selected_tip.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package selectedtip
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/common"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// RequestSelectedTipContext is the interface for the context needed for the RequestSelectedTip flow.
|
||||
type RequestSelectedTipContext interface {
|
||||
Domain() domain.Domain
|
||||
StartIBDIfRequired() error
|
||||
}
|
||||
|
||||
type requestSelectedTipFlow struct {
|
||||
RequestSelectedTipContext
|
||||
incomingRoute, outgoingRoute *router.Route
|
||||
peer *peerpkg.Peer
|
||||
}
|
||||
|
||||
// RequestSelectedTip waits for selected tip requests and handles them
|
||||
func RequestSelectedTip(context RequestSelectedTipContext, incomingRoute *router.Route,
|
||||
outgoingRoute *router.Route, peer *peerpkg.Peer) error {
|
||||
|
||||
flow := &requestSelectedTipFlow{
|
||||
RequestSelectedTipContext: context,
|
||||
incomingRoute: incomingRoute,
|
||||
outgoingRoute: outgoingRoute,
|
||||
peer: peer,
|
||||
}
|
||||
return flow.start()
|
||||
}
|
||||
|
||||
func (flow *requestSelectedTipFlow) start() error {
|
||||
for {
|
||||
err := flow.runSelectedTipRequest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (flow *requestSelectedTipFlow) runSelectedTipRequest() error {
|
||||
|
||||
flow.peer.WaitForSelectedTipRequests()
|
||||
defer flow.peer.FinishRequestingSelectedTip()
|
||||
|
||||
err := flow.requestSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peerSelectedTipHash, err := flow.receiveSelectedTip()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
flow.peer.SetSelectedTipHash(peerSelectedTipHash)
|
||||
return flow.StartIBDIfRequired()
|
||||
}
|
||||
|
||||
func (flow *requestSelectedTipFlow) requestSelectedTip() error {
|
||||
msgGetSelectedTip := appmessage.NewMsgRequestSelectedTip()
|
||||
return flow.outgoingRoute.Enqueue(msgGetSelectedTip)
|
||||
}
|
||||
|
||||
func (flow *requestSelectedTipFlow) receiveSelectedTip() (selectedTipHash *externalapi.DomainHash, err error) {
|
||||
message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgSelectedTip := message.(*appmessage.MsgSelectedTip)
|
||||
|
||||
return msgSelectedTip.SelectedTipHash, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package transactionrelay
|
||||
package relaytransactions
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
@@ -167,7 +167,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
|
||||
continue
|
||||
}
|
||||
tx := appmessage.MsgTxToDomainTransaction(msgTx)
|
||||
txID := consensushashing.TransactionID(tx)
|
||||
txID := consensusserialization.TransactionID(tx)
|
||||
if txID != expectedID {
|
||||
return protocolerrors.Errorf(true, "expected transaction %s, but got %s",
|
||||
expectedID, txID)
|
||||
@@ -1,4 +1,4 @@
|
||||
package transactionrelay
|
||||
package relaytransactions
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
@@ -1,4 +1,4 @@
|
||||
package transactionrelay
|
||||
package relaytransactions
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -37,6 +37,12 @@ func (m *Manager) Peers() []*peerpkg.Peer {
|
||||
return m.context.Peers()
|
||||
}
|
||||
|
||||
// IBDPeer returns the currently active IBD peer.
|
||||
// Returns nil if we aren't currently in IBD
|
||||
func (m *Manager) IBDPeer() *peerpkg.Peer {
|
||||
return m.context.IBDPeer()
|
||||
}
|
||||
|
||||
// AddTransaction adds transaction to the mempool and propagates it.
|
||||
func (m *Manager) AddTransaction(tx *externalapi.DomainTransaction) error {
|
||||
return m.context.AddTransaction(tx)
|
||||
|
||||
@@ -2,6 +2,7 @@ package peer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -21,6 +22,9 @@ const maxProtocolVersion = 1
|
||||
type Peer struct {
|
||||
connection *netadapter.NetConnection
|
||||
|
||||
selectedTipHashMtx sync.RWMutex
|
||||
selectedTipHash *externalapi.DomainHash
|
||||
|
||||
userAgent string
|
||||
services appmessage.ServiceFlag
|
||||
advertisedProtocolVerion uint32 // protocol version advertised by remote
|
||||
@@ -35,13 +39,21 @@ type Peer struct {
|
||||
lastPingNonce uint64 // The nonce of the last ping we sent
|
||||
lastPingTime time.Time // Time we sent last ping
|
||||
lastPingDuration time.Duration // Time for last ping to return
|
||||
|
||||
isSelectedTipRequested uint32
|
||||
selectedTipRequestChan chan struct{}
|
||||
lastSelectedTipRequest mstime.Time
|
||||
|
||||
ibdStartChan chan struct{}
|
||||
}
|
||||
|
||||
// New returns a new Peer
|
||||
func New(connection *netadapter.NetConnection) *Peer {
|
||||
return &Peer{
|
||||
connection: connection,
|
||||
connectionStarted: time.Now(),
|
||||
connection: connection,
|
||||
selectedTipRequestChan: make(chan struct{}),
|
||||
ibdStartChan: make(chan struct{}),
|
||||
connectionStarted: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +62,20 @@ func (p *Peer) Connection() *netadapter.NetConnection {
|
||||
return p.connection
|
||||
}
|
||||
|
||||
// SelectedTipHash returns the selected tip of the peer.
|
||||
func (p *Peer) SelectedTipHash() *externalapi.DomainHash {
|
||||
p.selectedTipHashMtx.RLock()
|
||||
defer p.selectedTipHashMtx.RUnlock()
|
||||
return p.selectedTipHash
|
||||
}
|
||||
|
||||
// SetSelectedTipHash sets the selected tip of the peer.
|
||||
func (p *Peer) SetSelectedTipHash(hash *externalapi.DomainHash) {
|
||||
p.selectedTipHashMtx.Lock()
|
||||
defer p.selectedTipHashMtx.Unlock()
|
||||
p.selectedTipHash = hash
|
||||
}
|
||||
|
||||
// SubnetworkID returns the subnetwork the peer is associated with.
|
||||
// It is nil in full nodes.
|
||||
func (p *Peer) SubnetworkID() *externalapi.DomainSubnetworkID {
|
||||
@@ -102,6 +128,7 @@ func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion) {
|
||||
p.userAgent = msg.UserAgent
|
||||
|
||||
p.disableRelayTx = msg.DisableRelayTx
|
||||
p.selectedTipHash = msg.SelectedTipHash
|
||||
p.subnetworkID = msg.SubnetworkID
|
||||
|
||||
p.timeOffset = mstime.Since(msg.Timestamp)
|
||||
@@ -129,6 +156,46 @@ func (p *Peer) String() string {
|
||||
return p.connection.String()
|
||||
}
|
||||
|
||||
// RequestSelectedTipIfRequired notifies the peer that requesting
|
||||
// a selected tip is required. This triggers the selected tip
|
||||
// request flow.
|
||||
func (p *Peer) RequestSelectedTipIfRequired() {
|
||||
if atomic.SwapUint32(&p.isSelectedTipRequested, 1) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
const minGetSelectedTipInterval = time.Minute
|
||||
if mstime.Since(p.lastSelectedTipRequest) < minGetSelectedTipInterval {
|
||||
return
|
||||
}
|
||||
|
||||
p.lastSelectedTipRequest = mstime.Now()
|
||||
p.selectedTipRequestChan <- struct{}{}
|
||||
}
|
||||
|
||||
// WaitForSelectedTipRequests blocks the current thread until
|
||||
// a selected tip is requested from this peer
|
||||
func (p *Peer) WaitForSelectedTipRequests() {
|
||||
<-p.selectedTipRequestChan
|
||||
}
|
||||
|
||||
// FinishRequestingSelectedTip finishes requesting the selected
|
||||
// tip from this peer
|
||||
func (p *Peer) FinishRequestingSelectedTip() {
|
||||
atomic.StoreUint32(&p.isSelectedTipRequested, 0)
|
||||
}
|
||||
|
||||
// StartIBD starts the IBD process for this peer
|
||||
func (p *Peer) StartIBD() {
|
||||
p.ibdStartChan <- struct{}{}
|
||||
}
|
||||
|
||||
// WaitForIBDStart blocks the current thread until
|
||||
// IBD start is requested from this peer
|
||||
func (p *Peer) WaitForIBDStart() {
|
||||
<-p.ibdStartChan
|
||||
}
|
||||
|
||||
// Address returns the address associated with this connection
|
||||
func (p *Peer) Address() string {
|
||||
return p.connection.Address()
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/handshake"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ibd"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ibd/selectedtip"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/ping"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
|
||||
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
|
||||
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
|
||||
@@ -105,6 +107,7 @@ func (m *Manager) registerFlows(router *routerpkg.Router, errChan chan error, is
|
||||
flows = m.registerAddressFlows(router, isStopping, errChan)
|
||||
flows = append(flows, m.registerBlockRelayFlows(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerPingFlows(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerIBDFlows(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerTransactionRelayFlow(router, isStopping, errChan)...)
|
||||
flows = append(flows, m.registerRejectsFlow(router, isStopping, errChan)...)
|
||||
|
||||
@@ -133,10 +136,7 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{
|
||||
appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdIBDRootNotFound, appmessage.CmdIBDRootUTXOSetAndBlock,
|
||||
appmessage.CmdHeader}, isStopping, errChan,
|
||||
m.registerFlow("HandleRelayInvs", router, []appmessage.MessageCommand{appmessage.CmdInvRelayBlock, appmessage.CmdBlock}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRelayInvs(m.context, incomingRoute,
|
||||
outgoingRoute, peer)
|
||||
@@ -148,34 +148,6 @@ func (m *Manager) registerBlockRelayFlows(router *routerpkg.Router, isStopping *
|
||||
return blockrelay.HandleRelayBlockRequests(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestBlockLocator(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestHeaders(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestIBDRootUTXOSetAndBlock", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDRootUTXOSetAndBlock}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleRequestIBDRootUTXOSetAndBlock(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return blockrelay.HandleIBDBlockRequests(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,6 +169,62 @@ func (m *Manager) registerPingFlows(router *routerpkg.Router, isStopping *uint32
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerIBDFlows(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlow("HandleIBD", router, []appmessage.MessageCommand{appmessage.CmdBlockLocator, appmessage.CmdIBDBlock,
|
||||
appmessage.CmdDoneHeaders, appmessage.CmdIBDRootNotFound, appmessage.CmdIBDRootUTXOSetAndBlock, appmessage.CmdHeader},
|
||||
isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ibd.HandleIBD(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("RequestSelectedTip", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdSelectedTip}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return selectedtip.RequestSelectedTip(m.context, incomingRoute, outgoingRoute, peer)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestSelectedTip", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestSelectedTip}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return selectedtip.HandleRequestSelectedTip(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestBlockLocator", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ibd.HandleRequestBlockLocator(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestHeaders", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ibd.HandleRequestHeaders(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleRequestIBDRootUTXOSetAndBlock", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDRootUTXOSetAndBlock}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ibd.HandleRequestIBDRootUTXOSetAndBlock(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
|
||||
m.registerFlow("HandleIBDBlockRequests", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return ibd.HandleIBDBlockRequests(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopping *uint32, errChan chan error) []*flow {
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
@@ -204,13 +232,13 @@ func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopp
|
||||
m.registerFlow("HandleRelayedTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
return relaytransactions.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
m.registerFlow("HandleRequestTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRequestedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
return relaytransactions.HandleRequestedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
@@ -65,9 +65,7 @@ func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAd
|
||||
for router, listener := range nm.listeners {
|
||||
if listener.propagateBlockAddedNotifications {
|
||||
err := router.OutgoingRoute().Enqueue(notification)
|
||||
if errors.Is(err, routerpkg.ErrRouteClosed) {
|
||||
log.Warnf("Couldn't send notification: %s", err)
|
||||
} else if err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package rpccontext
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blocks"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/pointers"
|
||||
@@ -23,13 +23,14 @@ import (
|
||||
// BuildBlockVerboseData builds a BlockVerboseData from the given block.
|
||||
// This method must be called with the DAG lock held for reads
|
||||
func (ctx *Context) BuildBlockVerboseData(block *externalapi.DomainBlock, includeTransactionVerboseData bool) (*appmessage.BlockVerboseData, error) {
|
||||
hash := consensushashing.BlockHash(block)
|
||||
hash := consensusserialization.BlockHash(block)
|
||||
blockHeader := block.Header
|
||||
|
||||
blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(hash)
|
||||
blueScore, err := blocks.ExtractBlueScore(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &appmessage.BlockVerboseData{
|
||||
Hash: hash.String(),
|
||||
Version: blockHeader.Version,
|
||||
@@ -42,19 +43,19 @@ func (ctx *Context) BuildBlockVerboseData(block *externalapi.DomainBlock, includ
|
||||
Time: blockHeader.TimeInMilliseconds,
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits), 16),
|
||||
Difficulty: ctx.GetDifficultyRatio(blockHeader.Bits, ctx.Config.ActiveNetParams),
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
BlueScore: blueScore,
|
||||
}
|
||||
|
||||
txIDs := make([]string, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txIDs[i] = consensushashing.TransactionID(tx).String()
|
||||
txIDs[i] = consensusserialization.TransactionID(tx).String()
|
||||
}
|
||||
result.TxIDs = txIDs
|
||||
|
||||
if includeTransactionVerboseData {
|
||||
transactionVerboseData := make([]*appmessage.TransactionVerboseData, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txID := consensushashing.TransactionID(tx).String()
|
||||
txID := consensusserialization.TransactionID(tx).String()
|
||||
data, err := ctx.BuildTransactionVerboseData(tx, txID, blockHeader, hash.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,11 +78,12 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
|
||||
target := util.CompactToBig(bits)
|
||||
|
||||
difficulty := new(big.Rat).SetFrac(params.PowMax, target)
|
||||
diff, _ := difficulty.Float64()
|
||||
|
||||
roundingPrecision := float64(100)
|
||||
diff = math.Round(diff*roundingPrecision) / roundingPrecision
|
||||
|
||||
outString := difficulty.FloatString(8)
|
||||
diff, err := strconv.ParseFloat(outString, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get difficulty: %s", err)
|
||||
return 0
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
@@ -98,7 +100,7 @@ func (ctx *Context) BuildTransactionVerboseData(tx *externalapi.DomainTransactio
|
||||
|
||||
txReply := &appmessage.TransactionVerboseData{
|
||||
TxID: txID,
|
||||
Hash: consensushashing.TransactionHash(tx).String(),
|
||||
Hash: consensusserialization.TransactionHash(tx).String(),
|
||||
Size: estimatedsize.TransactionEstimatedSerializedSize(tx),
|
||||
TransactionVerboseInputs: ctx.buildTransactionVerboseInputs(tx),
|
||||
TransactionVerboseOutputs: ctx.buildTransactionVerboseOutputs(tx, nil),
|
||||
|
||||
@@ -12,6 +12,6 @@ func HandleGetBlockCount(context *rpccontext.Context, _ *router.Router, _ appmes
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response := appmessage.NewGetBlockCountResponseMessage(syncInfo)
|
||||
response := appmessage.NewGetBlockCountResponseMessage(syncInfo.BlockCount)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -3,38 +3,15 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetBlockDAGInfo handles the respectively named RPC command
|
||||
func HandleGetBlockDAGInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
params := context.Config.ActiveNetParams
|
||||
consensus := context.Domain.Consensus()
|
||||
|
||||
response := appmessage.NewGetBlockDAGInfoResponseMessage()
|
||||
response.NetworkName = params.Name
|
||||
|
||||
syncInfo, err := consensus.GetSyncInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.BlockCount = syncInfo.BlockCount
|
||||
response.HeaderCount = syncInfo.HeaderCount
|
||||
|
||||
tipHashes, err := consensus.Tips()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.TipHashes = hashes.ToStrings(tipHashes)
|
||||
|
||||
virtualInfo, err := consensus.GetVirtualInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.VirtualParentHashes = hashes.ToStrings(virtualInfo.ParentHashes)
|
||||
response.Difficulty = context.GetDifficultyRatio(virtualInfo.Bits, context.Config.ActiveNetParams)
|
||||
response.PastMedianTime = virtualInfo.PastMedianTime
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -9,12 +9,15 @@ import (
|
||||
// HandleGetConnectedPeerInfo handles the respectively named RPC command
|
||||
func HandleGetConnectedPeerInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
peers := context.ProtocolManager.Peers()
|
||||
ibdPeer := context.ProtocolManager.IBDPeer()
|
||||
infos := make([]*appmessage.GetConnectedPeerInfoMessage, 0, len(peers))
|
||||
for _, peer := range peers {
|
||||
info := &appmessage.GetConnectedPeerInfoMessage{
|
||||
ID: peer.ID().String(),
|
||||
Address: peer.Address(),
|
||||
LastPingDuration: peer.LastPingDuration().Milliseconds(),
|
||||
SelectedTipHash: peer.SelectedTipHash().String(),
|
||||
IsSyncNode: peer == ibdPeer,
|
||||
IsOutbound: peer.IsOutbound(),
|
||||
TimeOffset: peer.TimeOffset().Milliseconds(),
|
||||
UserAgent: peer.UserAgent(),
|
||||
|
||||
@@ -3,7 +3,7 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ func HandleGetSelectedTipHash(context *rpccontext.Context, _ *router.Router, _ a
|
||||
}
|
||||
|
||||
response := appmessage.NewGetSelectedTipHashResponseMessage(
|
||||
consensushashing.BlockHash(selectedTip).String())
|
||||
consensusserialization.BlockHash(selectedTip).String())
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
return errorMessage, nil
|
||||
}
|
||||
|
||||
log.Infof("Accepted block %s via submitBlock", consensushashing.BlockHash(domainBlock))
|
||||
log.Infof("Accepted block %s via submitBlock", consensusserialization.BlockHash(domainBlock))
|
||||
|
||||
response := appmessage.NewSubmitBlockResponseMessage()
|
||||
return response, nil
|
||||
|
||||
@@ -3,7 +3,7 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/domain/miningmanager/mempool"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
"github.com/pkg/errors"
|
||||
@@ -14,7 +14,7 @@ func HandleSubmitTransaction(context *rpccontext.Context, _ *router.Router, requ
|
||||
submitTransactionRequest := request.(*appmessage.SubmitTransactionRequestMessage)
|
||||
|
||||
domainTransaction := appmessage.MsgTxToDomainTransaction(submitTransactionRequest.Transaction)
|
||||
transactionID := consensushashing.TransactionID(domainTransaction)
|
||||
transactionID := consensusserialization.TransactionID(domainTransaction)
|
||||
err := context.ProtocolManager.AddTransaction(domainTransaction)
|
||||
if err != nil {
|
||||
if !errors.As(err, &mempool.RuleError{}) {
|
||||
|
||||
@@ -2,13 +2,14 @@ package main
|
||||
|
||||
import (
|
||||
nativeerrors "errors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/pow"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
@@ -94,7 +95,7 @@ func mineNextBlock(client *minerClient, miningAddr util.Address, foundBlock chan
|
||||
}
|
||||
|
||||
func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error {
|
||||
blockHash := consensushashing.BlockHash(block)
|
||||
blockHash := consensusserialization.BlockHash(block)
|
||||
log.Infof("Found block %s with parents %s. Submitting to %s", blockHash, block.Header.ParentHashes, client.Address())
|
||||
|
||||
err := client.SubmitBlock(block)
|
||||
@@ -113,8 +114,9 @@ func solveBlock(block *externalapi.DomainBlock, stopChan chan struct{}, foundBlo
|
||||
return
|
||||
default:
|
||||
block.Header.Nonce = i
|
||||
hash := consensusserialization.BlockHash(block)
|
||||
atomic.AddUint64(&hashesTried, 1)
|
||||
if pow.CheckProofOfWorkWithTarget(block.Header, targetDifficulty) {
|
||||
if hashes.ToBig(hash).Cmp(targetDifficulty) <= 0 {
|
||||
foundBlock <- block
|
||||
return
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
type consensus struct {
|
||||
lock *sync.Mutex
|
||||
databaseContext model.DBManager
|
||||
databaseContext model.DBReader
|
||||
|
||||
blockProcessor model.BlockProcessor
|
||||
blockBuilder model.BlockBuilder
|
||||
@@ -25,25 +25,23 @@ type consensus struct {
|
||||
dagTraversalManager model.DAGTraversalManager
|
||||
difficultyManager model.DifficultyManager
|
||||
ghostdagManager model.GHOSTDAGManager
|
||||
headerTipsManager model.HeadersSelectedTipManager
|
||||
headerTipsManager model.HeaderTipsManager
|
||||
mergeDepthManager model.MergeDepthManager
|
||||
pruningManager model.PruningManager
|
||||
reachabilityManager model.ReachabilityManager
|
||||
finalityManager model.FinalityManager
|
||||
|
||||
acceptanceDataStore model.AcceptanceDataStore
|
||||
blockStore model.BlockStore
|
||||
blockHeaderStore model.BlockHeaderStore
|
||||
pruningStore model.PruningStore
|
||||
ghostdagDataStore model.GHOSTDAGDataStore
|
||||
blockRelationStore model.BlockRelationStore
|
||||
blockStatusStore model.BlockStatusStore
|
||||
consensusStateStore model.ConsensusStateStore
|
||||
headersSelectedTipStore model.HeaderSelectedTipStore
|
||||
multisetStore model.MultisetStore
|
||||
reachabilityDataStore model.ReachabilityDataStore
|
||||
utxoDiffStore model.UTXODiffStore
|
||||
finalityStore model.FinalityStore
|
||||
acceptanceDataStore model.AcceptanceDataStore
|
||||
blockStore model.BlockStore
|
||||
blockHeaderStore model.BlockHeaderStore
|
||||
pruningStore model.PruningStore
|
||||
ghostdagDataStore model.GHOSTDAGDataStore
|
||||
blockRelationStore model.BlockRelationStore
|
||||
blockStatusStore model.BlockStatusStore
|
||||
consensusStateStore model.ConsensusStateStore
|
||||
headerTipsStore model.HeaderTipsStore
|
||||
multisetStore model.MultisetStore
|
||||
reachabilityDataStore model.ReachabilityDataStore
|
||||
utxoDiffStore model.UTXODiffStore
|
||||
}
|
||||
|
||||
// BuildBlock builds a block over the current state, with the transactions
|
||||
@@ -59,7 +57,7 @@ func (s *consensus) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData,
|
||||
|
||||
// ValidateAndInsertBlock validates the given block and, if valid, applies it
|
||||
// to the current state
|
||||
func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock) (*externalapi.BlockInsertionResult, error) {
|
||||
func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
@@ -131,12 +129,11 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
return blockInfo, nil
|
||||
}
|
||||
|
||||
ghostdagData, err := s.ghostdagDataStore.Get(s.databaseContext, blockHash)
|
||||
isBlockInHeaderPruningPointFuture, err := s.syncManager.IsBlockInHeaderPruningPointFuture(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockInfo.BlueScore = ghostdagData.BlueScore()
|
||||
blockInfo.IsBlockInHeaderPruningPointFuture = isBlockInHeaderPruningPointFuture
|
||||
|
||||
return blockInfo, nil
|
||||
}
|
||||
@@ -177,11 +174,11 @@ func (s *consensus) GetPruningPointUTXOSet(expectedPruningPointHash *externalapi
|
||||
return serializedUTXOSet, nil
|
||||
}
|
||||
|
||||
func (s *consensus) ValidateAndInsertPruningPoint(newPruningPoint *externalapi.DomainBlock, serializedUTXOSet []byte) error {
|
||||
func (s *consensus) SetPruningPointUTXOSet(serializedUTXOSet []byte) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.blockProcessor.ValidateAndInsertPruningPoint(newPruningPoint, serializedUTXOSet)
|
||||
return s.consensusStateManager.SetPruningPointUTXOSet(serializedUTXOSet)
|
||||
}
|
||||
|
||||
func (s *consensus) GetVirtualSelectedParent() (*externalapi.DomainBlock, error) {
|
||||
@@ -192,39 +189,14 @@ func (s *consensus) GetVirtualSelectedParent() (*externalapi.DomainBlock, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.blockStore.Block(s.databaseContext, virtualGHOSTDAGData.SelectedParent())
|
||||
return s.blockStore.Block(s.databaseContext, virtualGHOSTDAGData.SelectedParent)
|
||||
}
|
||||
|
||||
func (s *consensus) Tips() ([]*externalapi.DomainHash, error) {
|
||||
return s.consensusStateStore.Tips(s.databaseContext)
|
||||
}
|
||||
|
||||
func (s *consensus) GetVirtualInfo() (*externalapi.VirtualInfo, error) {
|
||||
blockRelations, err := s.blockRelationStore.BlockRelation(s.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bits, err := s.difficultyManager.RequiredDifficulty(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pastMedianTime, err := s.pastMedianTimeManager.PastMedianTime(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &externalapi.VirtualInfo{
|
||||
ParentHashes: blockRelations.Parents,
|
||||
Bits: bits,
|
||||
PastMedianTime: pastMedianTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *consensus) CreateBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) (externalapi.BlockLocator, error) {
|
||||
func (s *consensus) CreateBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.syncManager.CreateBlockLocator(lowHash, highHash, limit)
|
||||
return s.syncManager.CreateBlockLocator(lowHash, highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) FindNextBlockLocatorBoundaries(blockLocator externalapi.BlockLocator) (lowHash, highHash *externalapi.DomainHash, err error) {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
@@ -19,19 +19,19 @@ func TestConsensus_GetBlockInfo(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
defer teardown()
|
||||
|
||||
invalidBlock, _, err := consensus.BuildBlockWithParents([]*externalapi.DomainHash{params.GenesisHash}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
invalidBlock.Header.TimeInMilliseconds = 0
|
||||
_, err = consensus.ValidateAndInsertBlock(invalidBlock)
|
||||
err = consensus.ValidateAndInsertBlock(invalidBlock)
|
||||
if !errors.Is(err, ruleerrors.ErrTimeTooOld) {
|
||||
t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrTimeTooOld, err)
|
||||
}
|
||||
|
||||
info, err := consensus.GetBlockInfo(consensushashing.BlockHash(invalidBlock))
|
||||
info, err := consensus.GetBlockInfo(consensusserialization.BlockHash(invalidBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get block info: %v", err)
|
||||
}
|
||||
@@ -42,6 +42,9 @@ func TestConsensus_GetBlockInfo(t *testing.T) {
|
||||
if info.BlockStatus != externalapi.StatusInvalid {
|
||||
t.Fatalf("Expected block status: %s, instead got: %s", externalapi.StatusInvalid, info.BlockStatus)
|
||||
}
|
||||
if info.IsBlockInHeaderPruningPointFuture != false {
|
||||
t.Fatalf("Expected IsBlockInHeaderPruningPointFuture=false, instead found: %t", info.IsBlockInHeaderPruningPointFuture)
|
||||
}
|
||||
|
||||
emptyCoinbase := externalapi.DomainCoinbaseData{}
|
||||
validBlock, err := consensus.BuildBlock(&emptyCoinbase, nil)
|
||||
@@ -49,12 +52,12 @@ func TestConsensus_GetBlockInfo(t *testing.T) {
|
||||
t.Fatalf("consensus.BuildBlock with an empty coinbase shouldn't fail: %v", err)
|
||||
}
|
||||
|
||||
_, err = consensus.ValidateAndInsertBlock(validBlock)
|
||||
err = consensus.ValidateAndInsertBlock(validBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("consensus.ValidateAndInsertBlock with a block straight from consensus.BuildBlock should not fail: %v", err)
|
||||
}
|
||||
|
||||
info, err = consensus.GetBlockInfo(consensushashing.BlockHash(validBlock))
|
||||
info, err = consensus.GetBlockInfo(consensusserialization.BlockHash(validBlock))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get block info: %v", err)
|
||||
}
|
||||
@@ -62,8 +65,11 @@ func TestConsensus_GetBlockInfo(t *testing.T) {
|
||||
if !info.Exists {
|
||||
t.Fatal("The block is missing")
|
||||
}
|
||||
if info.BlockStatus != externalapi.StatusUTXOValid {
|
||||
t.Fatalf("Expected block status: %s, instead got: %s", externalapi.StatusUTXOValid, info.BlockStatus)
|
||||
if info.BlockStatus != externalapi.StatusValid {
|
||||
t.Fatalf("Expected block status: %s, instead got: %s", externalapi.StatusValid, info.BlockStatus)
|
||||
}
|
||||
if info.IsBlockInHeaderPruningPointFuture != true {
|
||||
t.Fatalf("Expected IsBlockInHeaderPruningPointFuture=true, instead found: %t", info.IsBlockInHeaderPruningPointFuture)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
@@ -3,29 +3,26 @@ package serialization
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/ghostdagmanager"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// BlockGHOSTDAGDataToDBBlockGHOSTDAGData converts BlockGHOSTDAGData to DbBlockGhostdagData
|
||||
func BlockGHOSTDAGDataToDBBlockGHOSTDAGData(blockGHOSTDAGData model.BlockGHOSTDAGData) *DbBlockGhostdagData {
|
||||
func BlockGHOSTDAGDataToDBBlockGHOSTDAGData(blockGHOSTDAGData *model.BlockGHOSTDAGData) *DbBlockGhostdagData {
|
||||
var selectedParent *DbHash
|
||||
if blockGHOSTDAGData.SelectedParent() != nil {
|
||||
selectedParent = DomainHashToDbHash(blockGHOSTDAGData.SelectedParent())
|
||||
if blockGHOSTDAGData.SelectedParent != nil {
|
||||
selectedParent = DomainHashToDbHash(blockGHOSTDAGData.SelectedParent)
|
||||
}
|
||||
|
||||
return &DbBlockGhostdagData{
|
||||
BlueScore: blockGHOSTDAGData.BlueScore(),
|
||||
BlueWork: blockGHOSTDAGData.BlueWork().Bytes(),
|
||||
BlueScore: blockGHOSTDAGData.BlueScore,
|
||||
SelectedParent: selectedParent,
|
||||
MergeSetBlues: DomainHashesToDbHashes(blockGHOSTDAGData.MergeSetBlues()),
|
||||
MergeSetReds: DomainHashesToDbHashes(blockGHOSTDAGData.MergeSetReds()),
|
||||
BluesAnticoneSizes: bluesAnticoneSizesToDBBluesAnticoneSizes(blockGHOSTDAGData.BluesAnticoneSizes()),
|
||||
MergeSetBlues: DomainHashesToDbHashes(blockGHOSTDAGData.MergeSetBlues),
|
||||
MergeSetReds: DomainHashesToDbHashes(blockGHOSTDAGData.MergeSetReds),
|
||||
BluesAnticoneSizes: bluesAnticoneSizesToDBBluesAnticoneSizes(blockGHOSTDAGData.BluesAnticoneSizes),
|
||||
}
|
||||
}
|
||||
|
||||
// DBBlockGHOSTDAGDataToBlockGHOSTDAGData converts DbBlockGhostdagData to BlockGHOSTDAGData
|
||||
func DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData *DbBlockGhostdagData) (model.BlockGHOSTDAGData, error) {
|
||||
func DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData *DbBlockGhostdagData) (*model.BlockGHOSTDAGData, error) {
|
||||
var selectedParent *externalapi.DomainHash
|
||||
if dbBlockGHOSTDAGData.SelectedParent != nil {
|
||||
var err error
|
||||
@@ -50,12 +47,11 @@ func DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData *DbBlockGhostdag
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ghostdagmanager.NewBlockGHOSTDAGData(
|
||||
dbBlockGHOSTDAGData.BlueScore,
|
||||
new(big.Int).SetBytes(dbBlockGHOSTDAGData.BlueWork),
|
||||
selectedParent,
|
||||
mergetSetBlues,
|
||||
mergetSetReds,
|
||||
bluesAnticoneSizes,
|
||||
), nil
|
||||
return &model.BlockGHOSTDAGData{
|
||||
BlueScore: dbBlockGHOSTDAGData.BlueScore,
|
||||
SelectedParent: selectedParent,
|
||||
MergeSetBlues: mergetSetBlues,
|
||||
MergeSetReds: mergetSetReds,
|
||||
BluesAnticoneSizes: bluesAnticoneSizes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -865,11 +865,10 @@ type DbBlockGhostdagData struct {
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
BlueScore uint64 `protobuf:"varint,1,opt,name=blueScore,proto3" json:"blueScore,omitempty"`
|
||||
BlueWork []byte `protobuf:"bytes,2,opt,name=blueWork,proto3" json:"blueWork,omitempty"`
|
||||
SelectedParent *DbHash `protobuf:"bytes,3,opt,name=selectedParent,proto3" json:"selectedParent,omitempty"`
|
||||
MergeSetBlues []*DbHash `protobuf:"bytes,4,rep,name=mergeSetBlues,proto3" json:"mergeSetBlues,omitempty"`
|
||||
MergeSetReds []*DbHash `protobuf:"bytes,5,rep,name=mergeSetReds,proto3" json:"mergeSetReds,omitempty"`
|
||||
BluesAnticoneSizes []*DbBluesAnticoneSizes `protobuf:"bytes,6,rep,name=bluesAnticoneSizes,proto3" json:"bluesAnticoneSizes,omitempty"`
|
||||
SelectedParent *DbHash `protobuf:"bytes,2,opt,name=selectedParent,proto3" json:"selectedParent,omitempty"`
|
||||
MergeSetBlues []*DbHash `protobuf:"bytes,3,rep,name=mergeSetBlues,proto3" json:"mergeSetBlues,omitempty"`
|
||||
MergeSetReds []*DbHash `protobuf:"bytes,4,rep,name=mergeSetReds,proto3" json:"mergeSetReds,omitempty"`
|
||||
BluesAnticoneSizes []*DbBluesAnticoneSizes `protobuf:"bytes,5,rep,name=bluesAnticoneSizes,proto3" json:"bluesAnticoneSizes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DbBlockGhostdagData) Reset() {
|
||||
@@ -911,13 +910,6 @@ func (x *DbBlockGhostdagData) GetBlueScore() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DbBlockGhostdagData) GetBlueWork() []byte {
|
||||
if x != nil {
|
||||
return x.BlueWork
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DbBlockGhostdagData) GetSelectedParent() *DbHash {
|
||||
if x != nil {
|
||||
return x.SelectedParent
|
||||
@@ -1856,121 +1848,119 @@ var file_dbobjects_proto_rawDesc = []byte{
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64,
|
||||
0x72, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0d, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x74,
|
||||
0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xdb, 0x02, 0x0a,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xbf, 0x02, 0x0a,
|
||||
0x13, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67,
|
||||
0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f,
|
||||
0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x3d,
|
||||
0x0a, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0e, 0x73,
|
||||
0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a,
|
||||
0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04,
|
||||
0x72, 0x65, 0x12, 0x3d, 0x0a, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61,
|
||||
0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72,
|
||||
0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73,
|
||||
0x68, 0x52, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e,
|
||||
0x74, 0x12, 0x3b, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75,
|
||||
0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52,
|
||||
0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39,
|
||||
0x0a, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x18, 0x04,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0d, 0x6d, 0x65, 0x72,
|
||||
0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x6d, 0x65,
|
||||
0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x65, 0x72,
|
||||
0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x12, 0x53, 0x0a, 0x12, 0x62, 0x6c, 0x75,
|
||||
0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x18,
|
||||
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74,
|
||||
0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x52, 0x12, 0x62, 0x6c, 0x75, 0x65,
|
||||
0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x22, 0x6d,
|
||||
0x0a, 0x14, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e,
|
||||
0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61,
|
||||
0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52,
|
||||
0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6e, 0x74,
|
||||
0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x28, 0x0a,
|
||||
0x0a, 0x44, 0x62, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6d,
|
||||
0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d,
|
||||
0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x22, 0x46, 0x0a, 0x09, 0x44, 0x62, 0x55, 0x74, 0x78,
|
||||
0x6f, 0x53, 0x65, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22,
|
||||
0x87, 0x01, 0x0a, 0x14, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70,
|
||||
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65, 0x72,
|
||||
0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75, 0x74,
|
||||
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12,
|
||||
0x38, 0x0a, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
|
||||
0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x97, 0x01, 0x0a, 0x0b, 0x44, 0x62,
|
||||
0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e,
|
||||
0x74, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||
0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69,
|
||||
0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x62,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63,
|
||||
0x6f, 0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73,
|
||||
0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62,
|
||||
0x61, 0x73, 0x65, 0x22, 0x9c, 0x01, 0x0a, 0x12, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61,
|
||||
0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x74, 0x72,
|
||||
0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x52,
|
||||
0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x4e,
|
||||
0x6f, 0x64, 0x65, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x0a,
|
||||
0x11, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53,
|
||||
0x65, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52,
|
||||
0x11, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53,
|
||||
0x65, 0x74, 0x22, 0xbd, 0x01, 0x0a, 0x16, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62,
|
||||
0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x31, 0x0a,
|
||||
0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||
0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
|
||||
0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e,
|
||||
0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65,
|
||||
0x74, 0x52, 0x65, 0x64, 0x73, 0x12, 0x53, 0x0a, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e,
|
||||
0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e,
|
||||
0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x52, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74,
|
||||
0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x22, 0x6d, 0x0a, 0x14, 0x44, 0x62,
|
||||
0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a,
|
||||
0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x62, 0x6c, 0x75,
|
||||
0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e,
|
||||
0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, 0x6e, 0x74,
|
||||
0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x28, 0x0a, 0x0a, 0x44, 0x62, 0x4d,
|
||||
0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x75, 0x6c, 0x74, 0x69,
|
||||
0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d, 0x75, 0x6c, 0x74, 0x69,
|
||||
0x73, 0x65, 0x74, 0x22, 0x46, 0x0a, 0x09, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74,
|
||||
0x12, 0x39, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||
0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
|
||||
0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14,
|
||||
0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x49, 0x74, 0x65, 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e,
|
||||
0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x75,
|
||||
0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, 0x78, 0x6f,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x97, 0x01, 0x0a, 0x0b, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a,
|
||||
0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75,
|
||||
0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
|
||||
0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12,
|
||||
0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20,
|
||||
0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x22,
|
||||
0x9c, 0x01, 0x0a, 0x12, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69,
|
||||
0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x74, 0x72, 0x65, 0x65, 0x4e, 0x6f,
|
||||
0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68,
|
||||
0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x08, 0x74, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x0a, 0x11, 0x66, 0x75, 0x74,
|
||||
0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x18, 0x02,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x11, 0x66, 0x75, 0x74,
|
||||
0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x22, 0xbd,
|
||||
0x01, 0x0a, 0x16, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
|
||||
0x79, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x63, 0x68, 0x69,
|
||||
0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61,
|
||||
0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x06,
|
||||
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48,
|
||||
0x61, 0x73, 0x68, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69,
|
||||
0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
|
||||
0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65,
|
||||
0x72, 0x76, 0x61, 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x40,
|
||||
0x0a, 0x16, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
|
||||
0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10,
|
||||
0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, 0x6e, 0x64,
|
||||
0x22, 0x88, 0x01, 0x0a, 0x0a, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x44, 0x69, 0x66, 0x66, 0x12,
|
||||
0x39, 0x0a, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49,
|
||||
0x74, 0x65, 0x6d, 0x52, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x3f, 0x0a, 0x08, 0x74, 0x6f,
|
||||
0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55,
|
||||
0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65,
|
||||
0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x44,
|
||||
0x62, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58,
|
||||
0x4f, 0x53, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74,
|
||||
0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22,
|
||||
0x39, 0x0a, 0x0c, 0x44, 0x62, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x69, 0x70, 0x73, 0x12,
|
||||
0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
|
||||
0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22, 0x33, 0x0a, 0x06, 0x44, 0x62,
|
||||
0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22,
|
||||
0x5d, 0x0a, 0x14, 0x44, 0x62, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66,
|
||||
0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x12, 0x76, 0x69, 0x72, 0x74, 0x75,
|
||||
0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12, 0x76, 0x69, 0x72, 0x74,
|
||||
0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x24,
|
||||
0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14,
|
||||
0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
|
||||
0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x25, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x2e, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
|
||||
0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76,
|
||||
0x61, 0x6c, 0x22, 0x40, 0x0a, 0x16, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69,
|
||||
0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05,
|
||||
0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61,
|
||||
0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x03, 0x65, 0x6e, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x0a, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x44,
|
||||
0x69, 0x66, 0x66, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x3f,
|
||||
0x0a, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22,
|
||||
0x32, 0x0a, 0x1a, 0x44, 0x62, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e,
|
||||
0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79,
|
||||
0x74, 0x65, 0x73, 0x22, 0x39, 0x0a, 0x0c, 0x44, 0x62, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54,
|
||||
0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22, 0x33,
|
||||
0x0a, 0x06, 0x44, 0x62, 0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74,
|
||||
0x69, 0x70, 0x73, 0x22, 0x5d, 0x0a, 0x14, 0x44, 0x62, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c,
|
||||
0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x12, 0x76,
|
||||
0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
|
||||
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12,
|
||||
0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e,
|
||||
0x74, 0x73, 0x22, 0x24, 0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75,
|
||||
0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
|
||||
0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b,
|
||||
0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70,
|
||||
0x61, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -83,11 +83,10 @@ message DbBlockStatus {
|
||||
|
||||
message DbBlockGhostdagData {
|
||||
uint64 blueScore = 1;
|
||||
bytes blueWork = 2;
|
||||
DbHash selectedParent = 3;
|
||||
repeated DbHash mergeSetBlues = 4;
|
||||
repeated DbHash mergeSetReds = 5;
|
||||
repeated DbBluesAnticoneSizes bluesAnticoneSizes = 6;
|
||||
DbHash selectedParent = 2;
|
||||
repeated DbHash mergeSetBlues = 3;
|
||||
repeated DbHash mergeSetReds = 4;
|
||||
repeated DbBluesAnticoneSizes bluesAnticoneSizes = 5;
|
||||
}
|
||||
|
||||
message DbBluesAnticoneSizes {
|
||||
|
||||
@@ -2,39 +2,32 @@ package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
)
|
||||
|
||||
func utxoCollectionToDBUTXOCollection(utxoCollection model.UTXOCollection) ([]*DbUtxoCollectionItem, error) {
|
||||
items := make([]*DbUtxoCollectionItem, utxoCollection.Len())
|
||||
func utxoCollectionToDBUTXOCollection(utxoCollection model.UTXOCollection) []*DbUtxoCollectionItem {
|
||||
items := make([]*DbUtxoCollectionItem, len(utxoCollection))
|
||||
i := 0
|
||||
utxoIterator := utxoCollection.Iterator()
|
||||
for utxoIterator.Next() {
|
||||
outpoint, entry, err := utxoIterator.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for outpoint, entry := range utxoCollection {
|
||||
outpointCopy := outpoint
|
||||
items[i] = &DbUtxoCollectionItem{
|
||||
Outpoint: DomainOutpointToDbOutpoint(outpoint),
|
||||
Outpoint: DomainOutpointToDbOutpoint(&outpointCopy),
|
||||
UtxoEntry: UTXOEntryToDBUTXOEntry(entry),
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
return items, nil
|
||||
return items
|
||||
}
|
||||
|
||||
func dbUTXOCollectionToUTXOCollection(items []*DbUtxoCollectionItem) (model.UTXOCollection, error) {
|
||||
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, len(items))
|
||||
collection := make(model.UTXOCollection)
|
||||
for _, item := range items {
|
||||
outpoint, err := DbOutpointToDomainOutpoint(item.Outpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
utxoMap[*outpoint] = DBUTXOEntryToUTXOEntry(item.UtxoEntry)
|
||||
collection[*outpoint] = DBUTXOEntryToUTXOEntry(item.UtxoEntry)
|
||||
}
|
||||
return utxo.NewUTXOCollection(utxoMap), nil
|
||||
return collection, nil
|
||||
}
|
||||
|
||||
@@ -2,29 +2,18 @@ package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
)
|
||||
|
||||
// UTXODiffToDBUTXODiff converts UTXODiff to DbUtxoDiff
|
||||
func UTXODiffToDBUTXODiff(diff model.UTXODiff) (*DbUtxoDiff, error) {
|
||||
toAdd, err := utxoCollectionToDBUTXOCollection(diff.ToAdd())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toRemove, err := utxoCollectionToDBUTXOCollection(diff.ToRemove())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func UTXODiffToDBUTXODiff(diff *model.UTXODiff) *DbUtxoDiff {
|
||||
return &DbUtxoDiff{
|
||||
ToAdd: toAdd,
|
||||
ToRemove: toRemove,
|
||||
}, nil
|
||||
ToAdd: utxoCollectionToDBUTXOCollection(diff.ToAdd),
|
||||
ToRemove: utxoCollectionToDBUTXOCollection(diff.ToRemove),
|
||||
}
|
||||
}
|
||||
|
||||
// DBUTXODiffToUTXODiff converts DbUtxoDiff to UTXODiff
|
||||
func DBUTXODiffToUTXODiff(diff *DbUtxoDiff) (model.UTXODiff, error) {
|
||||
func DBUTXODiffToUTXODiff(diff *DbUtxoDiff) (*model.UTXODiff, error) {
|
||||
toAdd, err := dbUTXOCollectionToUTXOCollection(diff.ToAdd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -35,5 +24,8 @@ func DBUTXODiffToUTXODiff(diff *DbUtxoDiff) (model.UTXODiff, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utxo.NewUTXODiffFromCollections(toAdd, toRemove)
|
||||
return &model.UTXODiff{
|
||||
ToAdd: toAdd,
|
||||
ToRemove: toRemove,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -2,20 +2,24 @@ package serialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
)
|
||||
|
||||
// UTXOEntryToDBUTXOEntry converts UTXOEntry to DbUtxoEntry
|
||||
func UTXOEntryToDBUTXOEntry(utxoEntry externalapi.UTXOEntry) *DbUtxoEntry {
|
||||
func UTXOEntryToDBUTXOEntry(utxoEntry *externalapi.UTXOEntry) *DbUtxoEntry {
|
||||
return &DbUtxoEntry{
|
||||
Amount: utxoEntry.Amount(),
|
||||
ScriptPublicKey: utxoEntry.ScriptPublicKey(),
|
||||
BlockBlueScore: utxoEntry.BlockBlueScore(),
|
||||
IsCoinbase: utxoEntry.IsCoinbase(),
|
||||
Amount: utxoEntry.Amount,
|
||||
ScriptPublicKey: utxoEntry.ScriptPublicKey,
|
||||
BlockBlueScore: utxoEntry.BlockBlueScore,
|
||||
IsCoinbase: utxoEntry.IsCoinbase,
|
||||
}
|
||||
}
|
||||
|
||||
// DBUTXOEntryToUTXOEntry convert DbUtxoEntry ro UTXOEntry
|
||||
func DBUTXOEntryToUTXOEntry(dbUtxoEntry *DbUtxoEntry) externalapi.UTXOEntry {
|
||||
return utxo.NewUTXOEntry(dbUtxoEntry.Amount, dbUtxoEntry.ScriptPublicKey, dbUtxoEntry.IsCoinbase, dbUtxoEntry.BlockBlueScore)
|
||||
func DBUTXOEntryToUTXOEntry(dbUtxoEntry *DbUtxoEntry) *externalapi.UTXOEntry {
|
||||
return &externalapi.UTXOEntry{
|
||||
Amount: dbUtxoEntry.Amount,
|
||||
ScriptPublicKey: dbUtxoEntry.ScriptPublicKey,
|
||||
BlockBlueScore: dbUtxoEntry.BlockBlueScore,
|
||||
IsCoinbase: dbUtxoEntry.IsCoinbase,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
type consensusStateStore struct {
|
||||
tipsStaging []*externalapi.DomainHash
|
||||
virtualDiffParentsStaging []*externalapi.DomainHash
|
||||
virtualUTXODiffStaging model.UTXODiff
|
||||
virtualUTXODiffStaging *model.UTXODiff
|
||||
virtualUTXOSetStaging model.UTXOCollection
|
||||
|
||||
tipsCache []*externalapi.DomainHash
|
||||
|
||||
@@ -19,12 +19,12 @@ func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
|
||||
return utxoSetBucket.Key(serializedOutpoint), nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff model.UTXODiff) error {
|
||||
func (css *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff *model.UTXODiff) error {
|
||||
if css.virtualUTXOSetStaging != nil {
|
||||
return errors.New("cannot stage virtual UTXO diff while virtual UTXO set is staged")
|
||||
}
|
||||
|
||||
css.virtualUTXODiffStaging = virtualUTXODiff
|
||||
css.virtualUTXODiffStaging = virtualUTXODiff.Clone()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,13 +33,8 @@ func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction)
|
||||
return nil
|
||||
}
|
||||
|
||||
toRemoveIterator := css.virtualUTXODiffStaging.ToRemove().Iterator()
|
||||
for toRemoveIterator.Next() {
|
||||
toRemoveOutpoint, _, err := toRemoveIterator.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbKey, err := utxoKey(toRemoveOutpoint)
|
||||
for toRemoveOutpoint := range css.virtualUTXODiffStaging.ToRemove {
|
||||
dbKey, err := utxoKey(&toRemoveOutpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -49,13 +44,8 @@ func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction)
|
||||
}
|
||||
}
|
||||
|
||||
toAddIterator := css.virtualUTXODiffStaging.ToAdd().Iterator()
|
||||
for toAddIterator.Next() {
|
||||
toAddOutpoint, toAddEntry, err := toAddIterator.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbKey, err := utxoKey(toAddOutpoint)
|
||||
for toAddOutpoint, toAddEntry := range css.virtualUTXODiffStaging.ToAdd {
|
||||
dbKey, err := utxoKey(&toAddOutpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -79,13 +69,8 @@ func (css *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) e
|
||||
return nil
|
||||
}
|
||||
|
||||
iterator := css.virtualUTXOSetStaging.Iterator()
|
||||
for iterator.Next() {
|
||||
outpoint, utxoEntry, err := iterator.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbKey, err := utxoKey(outpoint)
|
||||
for outpoint, utxoEntry := range css.virtualUTXOSetStaging {
|
||||
dbKey, err := utxoKey(&outpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -105,7 +90,7 @@ func (css *consensusStateStore) commitVirtualUTXOSet(dbTx model.DBTransaction) e
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (
|
||||
externalapi.UTXOEntry, error) {
|
||||
*externalapi.UTXOEntry, error) {
|
||||
|
||||
if css.virtualUTXOSetStaging != nil {
|
||||
return css.utxoByOutpointFromStagedVirtualUTXOSet(outpoint)
|
||||
@@ -116,13 +101,13 @@ func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, outpoin
|
||||
|
||||
func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
outpoint *externalapi.DomainOutpoint) (
|
||||
externalapi.UTXOEntry, error) {
|
||||
*externalapi.UTXOEntry, error) {
|
||||
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
if css.virtualUTXODiffStaging.ToRemove().Contains(outpoint) {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToRemove[*outpoint]; ok {
|
||||
return nil, errors.Errorf("outpoint was not found")
|
||||
}
|
||||
if utxoEntry, ok := css.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok {
|
||||
if utxoEntry, ok := css.virtualUTXODiffStaging.ToAdd[*outpoint]; ok {
|
||||
return utxoEntry, nil
|
||||
}
|
||||
}
|
||||
@@ -141,8 +126,8 @@ func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContex
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) (
|
||||
externalapi.UTXOEntry, error) {
|
||||
if utxoEntry, ok := css.virtualUTXOSetStaging.Get(outpoint); ok {
|
||||
*externalapi.UTXOEntry, error) {
|
||||
if utxoEntry, ok := css.virtualUTXOSetStaging[*outpoint]; ok {
|
||||
return utxoEntry, nil
|
||||
}
|
||||
|
||||
@@ -161,10 +146,10 @@ func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbCon
|
||||
outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
if css.virtualUTXODiffStaging.ToRemove().Contains(outpoint) {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToRemove[*outpoint]; ok {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := css.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToAdd[*outpoint]; ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@@ -178,7 +163,8 @@ func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbCon
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXOSet(outpoint *externalapi.DomainOutpoint) bool {
|
||||
return css.virtualUTXOSetStaging.Contains(outpoint)
|
||||
_, ok := css.virtualUTXOSetStaging[*outpoint]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader) (model.ReadOnlyUTXOSetIterator, error) {
|
||||
@@ -207,7 +193,7 @@ func (u utxoSetIterator) Next() bool {
|
||||
return u.cursor.Next()
|
||||
}
|
||||
|
||||
func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) {
|
||||
func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry *externalapi.UTXOEntry, err error) {
|
||||
key, err := u.cursor.Key()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -236,19 +222,18 @@ func (css *consensusStateStore) StageVirtualUTXOSet(virtualUTXOSetIterator model
|
||||
return errors.New("cannot stage virtual UTXO set while virtual UTXO diff is staged")
|
||||
}
|
||||
|
||||
utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry)
|
||||
css.virtualUTXOSetStaging = make(model.UTXOCollection)
|
||||
for virtualUTXOSetIterator.Next() {
|
||||
outpoint, entry, err := virtualUTXOSetIterator.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, exists := utxoMap[*outpoint]; exists {
|
||||
if _, exists := css.virtualUTXOSetStaging[*outpoint]; exists {
|
||||
return errors.Errorf("outpoint %s is found more than once in the given iterator", outpoint)
|
||||
}
|
||||
utxoMap[*outpoint] = entry
|
||||
css.virtualUTXOSetStaging[*outpoint] = entry
|
||||
}
|
||||
css.virtualUTXOSetStaging = utxo.NewUTXOCollection(utxoMap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ func serializeOutpoint(outpoint *externalapi.DomainOutpoint) ([]byte, error) {
|
||||
return proto.Marshal(serialization.DomainOutpointToDbOutpoint(outpoint))
|
||||
}
|
||||
|
||||
func serializeUTXOEntry(entry externalapi.UTXOEntry) ([]byte, error) {
|
||||
func serializeUTXOEntry(entry *externalapi.UTXOEntry) ([]byte, error) {
|
||||
return proto.Marshal(serialization.UTXOEntryToDBUTXOEntry(entry))
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func deserializeOutpoint(outpointBytes []byte) (*externalapi.DomainOutpoint, err
|
||||
return serialization.DbOutpointToDomainOutpoint(dbOutpoint)
|
||||
}
|
||||
|
||||
func deserializeUTXOEntry(entryBytes []byte) (externalapi.UTXOEntry, error) {
|
||||
func deserializeUTXOEntry(entryBytes []byte) (*externalapi.UTXOEntry, error) {
|
||||
dbEntry := &serialization.DbUtxoEntry{}
|
||||
err := proto.Unmarshal(entryBytes, dbEntry)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
package finalitystore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var bucket = dbkeys.MakeBucket([]byte("finality-points"))
|
||||
|
||||
type finalityStore struct {
|
||||
staging map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new FinalityStore
|
||||
func New(cacheSize int) model.FinalityStore {
|
||||
return &finalityStore{
|
||||
staging: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *finalityStore) StageFinalityPoint(blockHash *externalapi.DomainHash, finalityPointHash *externalapi.DomainHash) {
|
||||
fs.staging[*blockHash] = finalityPointHash
|
||||
}
|
||||
|
||||
func (fs *finalityStore) FinalityPoint(
|
||||
dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
if finalityPointHash, ok := fs.staging[*blockHash]; ok {
|
||||
return finalityPointHash, nil
|
||||
}
|
||||
|
||||
if finalityPointHash, ok := fs.cache.Get(blockHash); ok {
|
||||
return finalityPointHash.(*externalapi.DomainHash), nil
|
||||
}
|
||||
|
||||
finalityPointHashBytes, err := dbContext.Get(fs.hashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalityPointHash, err := hashes.FromBytes(finalityPointHashBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.cache.Add(blockHash, finalityPointHash)
|
||||
return finalityPointHash, nil
|
||||
}
|
||||
|
||||
func (fs *finalityStore) Discard() {
|
||||
fs.staging = make(map[externalapi.DomainHash]*externalapi.DomainHash)
|
||||
}
|
||||
|
||||
func (fs *finalityStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, finalityPointHash := range fs.staging {
|
||||
err := dbTx.Put(fs.hashAsKey(&hash), finalityPointHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.cache.Add(&hash, finalityPointHash)
|
||||
}
|
||||
|
||||
fs.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *finalityStore) IsStaged() bool {
|
||||
return len(fs.staging) == 0
|
||||
}
|
||||
|
||||
func (fs *finalityStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
return bucket.Key(hash[:])
|
||||
}
|
||||
@@ -13,21 +13,21 @@ var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data"))
|
||||
|
||||
// ghostdagDataStore represents a store of BlockGHOSTDAGData
|
||||
type ghostdagDataStore struct {
|
||||
staging map[externalapi.DomainHash]model.BlockGHOSTDAGData
|
||||
staging map[externalapi.DomainHash]*model.BlockGHOSTDAGData
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new GHOSTDAGDataStore
|
||||
func New(cacheSize int) model.GHOSTDAGDataStore {
|
||||
return &ghostdagDataStore{
|
||||
staging: make(map[externalapi.DomainHash]model.BlockGHOSTDAGData),
|
||||
staging: make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData),
|
||||
cache: lrucache.New(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given blockGHOSTDAGData for the given blockHash
|
||||
func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData model.BlockGHOSTDAGData) {
|
||||
gds.staging[*blockHash] = blockGHOSTDAGData
|
||||
func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) {
|
||||
gds.staging[*blockHash] = blockGHOSTDAGData.Clone()
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) IsStaged() bool {
|
||||
@@ -35,7 +35,7 @@ func (gds *ghostdagDataStore) IsStaged() bool {
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) Discard() {
|
||||
gds.staging = make(map[externalapi.DomainHash]model.BlockGHOSTDAGData)
|
||||
gds.staging = make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData)
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
@@ -56,13 +56,13 @@ func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
}
|
||||
|
||||
// Get gets the blockGHOSTDAGData associated with the given blockHash
|
||||
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.BlockGHOSTDAGData, error) {
|
||||
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockGHOSTDAGData, error) {
|
||||
if blockGHOSTDAGData, ok := gds.staging[*blockHash]; ok {
|
||||
return blockGHOSTDAGData, nil
|
||||
return blockGHOSTDAGData.Clone(), nil
|
||||
}
|
||||
|
||||
if blockGHOSTDAGData, ok := gds.cache.Get(blockHash); ok {
|
||||
return blockGHOSTDAGData.(model.BlockGHOSTDAGData), nil
|
||||
return blockGHOSTDAGData.(*model.BlockGHOSTDAGData).Clone(), nil
|
||||
}
|
||||
|
||||
blockGHOSTDAGDataBytes, err := dbContext.Get(gds.hashAsKey(blockHash))
|
||||
@@ -75,18 +75,18 @@ func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externala
|
||||
return nil, err
|
||||
}
|
||||
gds.cache.Add(blockHash, blockGHOSTDAGData)
|
||||
return blockGHOSTDAGData, nil
|
||||
return blockGHOSTDAGData.Clone(), nil
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
return bucket.Key(hash[:])
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) serializeBlockGHOSTDAGData(blockGHOSTDAGData model.BlockGHOSTDAGData) ([]byte, error) {
|
||||
func (gds *ghostdagDataStore) serializeBlockGHOSTDAGData(blockGHOSTDAGData *model.BlockGHOSTDAGData) ([]byte, error) {
|
||||
return proto.Marshal(serialization.BlockGHOSTDAGDataToDBBlockGHOSTDAGData(blockGHOSTDAGData))
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes []byte) (model.BlockGHOSTDAGData, error) {
|
||||
func (gds *ghostdagDataStore) deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes []byte) (*model.BlockGHOSTDAGData, error) {
|
||||
dbBlockGHOSTDAGData := &serialization.DbBlockGhostdagData{}
|
||||
err := proto.Unmarshal(blockGHOSTDAGDataBytes, dbBlockGHOSTDAGData)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
package headersselectedtipstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
)
|
||||
|
||||
var headerSelectedTipKey = dbkeys.MakeBucket().Key([]byte("headers-selected-tip"))
|
||||
|
||||
type headerSelectedTipStore struct {
|
||||
staging *externalapi.DomainHash
|
||||
cache *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new HeaderSelectedTipStore
|
||||
func New() model.HeaderSelectedTipStore {
|
||||
return &headerSelectedTipStore{}
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Has(dbContext model.DBReader) (bool, error) {
|
||||
if hts.staging != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if hts.cache != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(headerSelectedTipKey)
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Discard() {
|
||||
hts.staging = nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Commit(dbTx model.DBTransaction) error {
|
||||
if hts.staging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
selectedTipBytes, err := hts.serializeHeadersSelectedTip(hts.staging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(headerSelectedTipKey, selectedTipBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hts.cache = hts.staging
|
||||
|
||||
hts.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Stage(selectedTip *externalapi.DomainHash) {
|
||||
hts.staging = selectedTip.Clone()
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) IsStaged() bool {
|
||||
return hts.staging != nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) HeadersSelectedTip(dbContext model.DBReader) (*externalapi.DomainHash, error) {
|
||||
if hts.staging != nil {
|
||||
return hts.staging.Clone(), nil
|
||||
}
|
||||
|
||||
if hts.cache != nil {
|
||||
return hts.cache.Clone(), nil
|
||||
}
|
||||
|
||||
selectedTipBytes, err := dbContext.Get(headerSelectedTipKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selectedTip, err := hts.deserializeHeadersSelectedTip(selectedTipBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hts.cache = selectedTip
|
||||
return hts.cache.Clone(), nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) serializeHeadersSelectedTip(selectedTip *externalapi.DomainHash) ([]byte, error) {
|
||||
return proto.Marshal(serialization.DomainHashToDbHash(selectedTip))
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) deserializeHeadersSelectedTip(selectedTipBytes []byte) (*externalapi.DomainHash, error) {
|
||||
dbHash := &serialization.DbHash{}
|
||||
err := proto.Unmarshal(selectedTipBytes, dbHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serialization.DbHashToDomainHash(dbHash)
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
package headertipsstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
|
||||
)
|
||||
|
||||
var headerTipsKey = dbkeys.MakeBucket().Key([]byte("header-tips"))
|
||||
|
||||
type headerTipsStore struct {
|
||||
staging []*externalapi.DomainHash
|
||||
cache []*externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new HeaderTipsStore
|
||||
func New() model.HeaderTipsStore {
|
||||
return &headerTipsStore{}
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) HasTips(dbContext model.DBReader) (bool, error) {
|
||||
if len(hts.staging) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if len(hts.cache) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(headerTipsKey)
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) Discard() {
|
||||
hts.staging = nil
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) Commit(dbTx model.DBTransaction) error {
|
||||
if hts.staging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tipsBytes, err := hts.serializeTips(hts.staging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(headerTipsKey, tipsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hts.cache = hts.staging
|
||||
|
||||
hts.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) Stage(tips []*externalapi.DomainHash) {
|
||||
hts.staging = externalapi.CloneHashes(tips)
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) IsStaged() bool {
|
||||
return hts.staging != nil
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if hts.staging != nil {
|
||||
return externalapi.CloneHashes(hts.staging), nil
|
||||
}
|
||||
|
||||
if hts.cache != nil {
|
||||
return externalapi.CloneHashes(hts.cache), nil
|
||||
}
|
||||
|
||||
tipsBytes, err := dbContext.Get(headerTipsKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tips, err := hts.deserializeTips(tipsBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hts.cache = tips
|
||||
return externalapi.CloneHashes(tips), nil
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) {
|
||||
dbTips := serialization.HeaderTipsToDBHeaderTips(tips)
|
||||
return proto.Marshal(dbTips)
|
||||
}
|
||||
|
||||
func (hts *headerTipsStore) deserializeTips(tipsBytes []byte) ([]*externalapi.DomainHash, error) {
|
||||
dbTips := &serialization.DbHeaderTips{}
|
||||
err := proto.Unmarshal(tipsBytes, dbTips)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serialization.DBHeaderTipsToHeaderTips(dbTips)
|
||||
}
|
||||
@@ -15,7 +15,7 @@ var utxoDiffChildBucket = dbkeys.MakeBucket([]byte("utxo-diff-children"))
|
||||
|
||||
// utxoDiffStore represents a store of UTXODiffs
|
||||
type utxoDiffStore struct {
|
||||
utxoDiffStaging map[externalapi.DomainHash]model.UTXODiff
|
||||
utxoDiffStaging map[externalapi.DomainHash]*model.UTXODiff
|
||||
utxoDiffChildStaging map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
utxoDiffCache *lrucache.LRUCache
|
||||
@@ -25,7 +25,7 @@ type utxoDiffStore struct {
|
||||
// New instantiates a new UTXODiffStore
|
||||
func New(cacheSize int) model.UTXODiffStore {
|
||||
return &utxoDiffStore{
|
||||
utxoDiffStaging: make(map[externalapi.DomainHash]model.UTXODiff),
|
||||
utxoDiffStaging: make(map[externalapi.DomainHash]*model.UTXODiff),
|
||||
utxoDiffChildStaging: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
utxoDiffCache: lrucache.New(cacheSize),
|
||||
@@ -34,8 +34,8 @@ func New(cacheSize int) model.UTXODiffStore {
|
||||
}
|
||||
|
||||
// Stage stages the given utxoDiff for the given blockHash
|
||||
func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff model.UTXODiff, utxoDiffChild *externalapi.DomainHash) {
|
||||
uds.utxoDiffStaging[*blockHash] = utxoDiff
|
||||
func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff *model.UTXODiff, utxoDiffChild *externalapi.DomainHash) {
|
||||
uds.utxoDiffStaging[*blockHash] = utxoDiff.Clone()
|
||||
|
||||
if utxoDiffChild != nil {
|
||||
uds.utxoDiffChildStaging[*blockHash] = utxoDiffChild.Clone()
|
||||
@@ -55,7 +55,7 @@ func (uds *utxoDiffStore) IsBlockHashStaged(blockHash *externalapi.DomainHash) b
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) Discard() {
|
||||
uds.utxoDiffStaging = make(map[externalapi.DomainHash]model.UTXODiff)
|
||||
uds.utxoDiffStaging = make(map[externalapi.DomainHash]*model.UTXODiff)
|
||||
uds.utxoDiffChildStaging = make(map[externalapi.DomainHash]*externalapi.DomainHash)
|
||||
uds.toDelete = make(map[externalapi.DomainHash]struct{})
|
||||
}
|
||||
@@ -107,13 +107,13 @@ func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
}
|
||||
|
||||
// UTXODiff gets the utxoDiff associated with the given blockHash
|
||||
func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.UTXODiff, error) {
|
||||
func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.UTXODiff, error) {
|
||||
if utxoDiff, ok := uds.utxoDiffStaging[*blockHash]; ok {
|
||||
return utxoDiff, nil
|
||||
return utxoDiff.Clone(), nil
|
||||
}
|
||||
|
||||
if utxoDiff, ok := uds.utxoDiffCache.Get(blockHash); ok {
|
||||
return utxoDiff.(model.UTXODiff), nil
|
||||
return utxoDiff.(*model.UTXODiff).Clone(), nil
|
||||
}
|
||||
|
||||
utxoDiffBytes, err := dbContext.Get(uds.utxoDiffHashAsKey(blockHash))
|
||||
@@ -126,7 +126,7 @@ func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *external
|
||||
return nil, err
|
||||
}
|
||||
uds.utxoDiffCache.Add(blockHash, utxoDiff)
|
||||
return utxoDiff, nil
|
||||
return utxoDiff.Clone(), nil
|
||||
}
|
||||
|
||||
// UTXODiffChild gets the utxoDiff child associated with the given blockHash
|
||||
@@ -187,21 +187,16 @@ func (uds *utxoDiffStore) utxoDiffChildHashAsKey(hash *externalapi.DomainHash) m
|
||||
return utxoDiffChildBucket.Key(hash[:])
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff model.UTXODiff) ([]byte, error) {
|
||||
dbUtxoDiff, err := serialization.UTXODiffToDBUTXODiff(utxoDiff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff *model.UTXODiff) ([]byte, error) {
|
||||
dbUtxoDiff := serialization.UTXODiffToDBUTXODiff(utxoDiff)
|
||||
bytes, err := proto.Marshal(dbUtxoDiff)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) deserializeUTXODiff(utxoDiffBytes []byte) (model.UTXODiff, error) {
|
||||
func (uds *utxoDiffStore) deserializeUTXODiff(utxoDiffBytes []byte) (*model.UTXODiff, error) {
|
||||
dbUTXODiff := &serialization.DbUtxoDiff{}
|
||||
err := proto.Unmarshal(utxoDiffBytes, dbUTXODiff)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/dagtraversalmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/finalitymanager"
|
||||
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
|
||||
consensusdatabase "github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/acceptancedatastore"
|
||||
@@ -15,13 +16,13 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/blockstatusstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/blockstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/consensusstatestore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/finalitystore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/ghostdagdatastore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/headersselectedtipstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/headertipsstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/multisetstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/pruningstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/reachabilitydatastore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/utxodiffstore"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/blockbuilder"
|
||||
@@ -32,7 +33,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/dagtopologymanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/difficultymanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/ghostdagmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/headersselectedtipmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/headertipsmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/mergedepthmanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/pastmediantimemanager"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/pruningmanager"
|
||||
@@ -41,16 +42,14 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/transactionvalidator"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
|
||||
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
|
||||
)
|
||||
|
||||
// Factory instantiates new Consensuses
|
||||
type Factory interface {
|
||||
NewConsensus(dagParams *dagconfig.Params, db infrastructuredatabase.Database) (externalapi.Consensus, error)
|
||||
NewTestConsensus(dagParams *dagconfig.Params, testName string) (
|
||||
tc testapi.TestConsensus, teardown func(keepDataDir bool), err error)
|
||||
NewTestConsensus(dagParams *dagconfig.Params, testName string) (tc testapi.TestConsensus, teardown func(), err error)
|
||||
NewTestConsensusWithDataDir(dagParams *dagconfig.Params, dataDir string) (
|
||||
tc testapi.TestConsensus, teardown func(keepDataDir bool), err error)
|
||||
tc testapi.TestConsensus, teardown func(), err error)
|
||||
}
|
||||
|
||||
type factory struct{}
|
||||
@@ -65,25 +64,25 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dbManager := consensusdatabase.New(db)
|
||||
|
||||
// Data Structures
|
||||
acceptanceDataStore := acceptancedatastore.New(200)
|
||||
blockStore, err := blockstore.New(dbManager, 200)
|
||||
storeCacheSize := 200
|
||||
acceptanceDataStore := acceptancedatastore.New(storeCacheSize)
|
||||
blockStore, err := blockstore.New(dbManager, storeCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHeaderStore, err := blockheaderstore.New(dbManager, 10_000)
|
||||
blockHeaderStore, err := blockheaderstore.New(dbManager, storeCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRelationStore := blockrelationstore.New(200)
|
||||
blockStatusStore := blockstatusstore.New(200)
|
||||
multisetStore := multisetstore.New(200)
|
||||
blockRelationStore := blockrelationstore.New(storeCacheSize)
|
||||
blockStatusStore := blockstatusstore.New(storeCacheSize)
|
||||
multisetStore := multisetstore.New(storeCacheSize)
|
||||
pruningStore := pruningstore.New()
|
||||
reachabilityDataStore := reachabilitydatastore.New(200)
|
||||
utxoDiffStore := utxodiffstore.New(200)
|
||||
reachabilityDataStore := reachabilitydatastore.New(storeCacheSize)
|
||||
utxoDiffStore := utxodiffstore.New(storeCacheSize)
|
||||
consensusStateStore := consensusstatestore.New()
|
||||
ghostdagDataStore := ghostdagdatastore.New(10_000)
|
||||
headersSelectedTipStore := headersselectedtipstore.New()
|
||||
finalityStore := finalitystore.New(200)
|
||||
ghostdagDataStore := ghostdagdatastore.New(storeCacheSize)
|
||||
headerTipsStore := headertipsstore.New()
|
||||
|
||||
// Processes
|
||||
reachabilityManager := reachabilitymanager.New(
|
||||
@@ -93,14 +92,12 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dagTopologyManager := dagtopologymanager.New(
|
||||
dbManager,
|
||||
reachabilityManager,
|
||||
blockRelationStore,
|
||||
ghostdagDataStore)
|
||||
blockRelationStore)
|
||||
ghostdagManager := ghostdagmanager.New(
|
||||
dbManager,
|
||||
dagTopologyManager,
|
||||
ghostdagDataStore,
|
||||
blockHeaderStore,
|
||||
dagParams.K)
|
||||
model.KType(dagParams.K))
|
||||
dagTraversalManager := dagtraversalmanager.New(
|
||||
dbManager,
|
||||
dagTopologyManager,
|
||||
@@ -114,10 +111,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
ghostdagDataStore)
|
||||
transactionValidator := transactionvalidator.New(dagParams.BlockCoinbaseMaturity,
|
||||
dagParams.EnableNonNativeSubnetworks,
|
||||
dagParams.MassPerTxByte,
|
||||
dagParams.MassPerScriptPubKeyByte,
|
||||
dagParams.MassPerSigOp,
|
||||
dagParams.MaxCoinbasePayloadLength,
|
||||
dbManager,
|
||||
pastMedianTimeManager,
|
||||
ghostdagDataStore)
|
||||
@@ -130,41 +123,26 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dagTraversalManager,
|
||||
dagParams.PowMax,
|
||||
dagParams.DifficultyAdjustmentWindowSize,
|
||||
dagParams.DisableDifficultyAdjustment,
|
||||
dagParams.TargetTimePerBlock,
|
||||
dagParams.GenesisHash)
|
||||
dagParams.TargetTimePerBlock)
|
||||
coinbaseManager := coinbasemanager.New(
|
||||
dbManager,
|
||||
dagParams.SubsidyReductionInterval,
|
||||
dagParams.BaseSubsidy,
|
||||
dagParams.CoinbasePayloadScriptPublicKeyMaxLength,
|
||||
ghostdagDataStore,
|
||||
acceptanceDataStore)
|
||||
headerTipsManager := headersselectedtipmanager.New(dbManager, dagTopologyManager, ghostdagManager, headersSelectedTipStore)
|
||||
headerTipsManager := headertipsmanager.New(dbManager, dagTopologyManager, ghostdagManager, headerTipsStore)
|
||||
genesisHash := dagParams.GenesisHash
|
||||
finalityManager := finalitymanager.New(
|
||||
dbManager,
|
||||
dagTopologyManager,
|
||||
finalityStore,
|
||||
ghostdagDataStore,
|
||||
genesisHash,
|
||||
dagParams.FinalityDepth())
|
||||
mergeDepthManager := mergedepthmanager.New(
|
||||
dagParams.FinalityDepth(),
|
||||
dbManager,
|
||||
dagTopologyManager,
|
||||
dagTraversalManager,
|
||||
finalityManager,
|
||||
ghostdagDataStore)
|
||||
blockValidator := blockvalidator.New(
|
||||
dagParams.PowMax,
|
||||
dagParams.SkipProofOfWork,
|
||||
genesisHash,
|
||||
dagParams.EnableNonNativeSubnetworks,
|
||||
dagParams.MaxBlockSize,
|
||||
dagParams.MergeSetSizeLimit,
|
||||
dagParams.MaxBlockParents,
|
||||
dagParams.TimestampDeviationTolerance,
|
||||
dagParams.TargetTimePerBlock,
|
||||
dagParams.DisableDifficultyAdjustment,
|
||||
dagParams.DifficultyAdjustmentWindowSize,
|
||||
|
||||
dbManager,
|
||||
difficultyManager,
|
||||
@@ -175,23 +153,17 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dagTraversalManager,
|
||||
coinbaseManager,
|
||||
mergeDepthManager,
|
||||
reachabilityManager,
|
||||
|
||||
pruningStore,
|
||||
blockStore,
|
||||
ghostdagDataStore,
|
||||
blockHeaderStore,
|
||||
blockStatusStore,
|
||||
reachabilityDataStore,
|
||||
)
|
||||
consensusStateManager, err := consensusstatemanager.New(
|
||||
dbManager,
|
||||
dagParams.FinalityDepth(),
|
||||
dagParams.PruningDepth(),
|
||||
dagParams.MaxMassAcceptedByBlock,
|
||||
dagParams.MaxBlockParents,
|
||||
dagParams.MergeSetSizeLimit,
|
||||
genesisHash,
|
||||
|
||||
ghostdagManager,
|
||||
dagTopologyManager,
|
||||
dagTraversalManager,
|
||||
@@ -201,7 +173,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
reachabilityManager,
|
||||
coinbaseManager,
|
||||
mergeDepthManager,
|
||||
finalityManager,
|
||||
|
||||
blockStatusStore,
|
||||
ghostdagDataStore,
|
||||
@@ -212,8 +183,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
blockRelationStore,
|
||||
acceptanceDataStore,
|
||||
blockHeaderStore,
|
||||
headersSelectedTipStore,
|
||||
pruningStore)
|
||||
headerTipsStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -227,7 +197,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
ghostdagDataStore,
|
||||
pruningStore,
|
||||
blockStatusStore,
|
||||
headersSelectedTipStore,
|
||||
multisetStore,
|
||||
acceptanceDataStore,
|
||||
blockStore,
|
||||
@@ -239,16 +208,17 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
syncManager := syncmanager.New(
|
||||
dbManager,
|
||||
genesisHash,
|
||||
dagParams.TargetTimePerBlock.Milliseconds(),
|
||||
dagTraversalManager,
|
||||
dagTopologyManager,
|
||||
ghostdagManager,
|
||||
pruningManager,
|
||||
consensusStateManager,
|
||||
|
||||
ghostdagDataStore,
|
||||
blockStatusStore,
|
||||
blockHeaderStore,
|
||||
blockStore,
|
||||
pruningStore)
|
||||
headerTipsStore,
|
||||
blockStore)
|
||||
|
||||
blockBuilder := blockbuilder.New(
|
||||
dbManager,
|
||||
@@ -264,7 +234,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
)
|
||||
|
||||
blockProcessor := blockprocessor.New(
|
||||
genesisHash,
|
||||
dagParams,
|
||||
dbManager,
|
||||
consensusStateManager,
|
||||
pruningManager,
|
||||
@@ -289,8 +259,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
reachabilityDataStore,
|
||||
utxoDiffStore,
|
||||
blockHeaderStore,
|
||||
headersSelectedTipStore,
|
||||
finalityStore)
|
||||
headerTipsStore)
|
||||
|
||||
c := &consensus{
|
||||
lock: &sync.Mutex{},
|
||||
@@ -312,21 +281,19 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
mergeDepthManager: mergeDepthManager,
|
||||
pruningManager: pruningManager,
|
||||
reachabilityManager: reachabilityManager,
|
||||
finalityManager: finalityManager,
|
||||
|
||||
acceptanceDataStore: acceptanceDataStore,
|
||||
blockStore: blockStore,
|
||||
blockHeaderStore: blockHeaderStore,
|
||||
pruningStore: pruningStore,
|
||||
ghostdagDataStore: ghostdagDataStore,
|
||||
blockStatusStore: blockStatusStore,
|
||||
blockRelationStore: blockRelationStore,
|
||||
consensusStateStore: consensusStateStore,
|
||||
headersSelectedTipStore: headersSelectedTipStore,
|
||||
multisetStore: multisetStore,
|
||||
reachabilityDataStore: reachabilityDataStore,
|
||||
utxoDiffStore: utxoDiffStore,
|
||||
finalityStore: finalityStore,
|
||||
acceptanceDataStore: acceptanceDataStore,
|
||||
blockStore: blockStore,
|
||||
blockHeaderStore: blockHeaderStore,
|
||||
pruningStore: pruningStore,
|
||||
ghostdagDataStore: ghostdagDataStore,
|
||||
blockStatusStore: blockStatusStore,
|
||||
blockRelationStore: blockRelationStore,
|
||||
consensusStateStore: consensusStateStore,
|
||||
headerTipsStore: headerTipsStore,
|
||||
multisetStore: multisetStore,
|
||||
reachabilityDataStore: reachabilityDataStore,
|
||||
utxoDiffStore: utxoDiffStore,
|
||||
}
|
||||
|
||||
genesisInfo, err := c.GetBlockInfo(genesisHash)
|
||||
@@ -335,7 +302,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
}
|
||||
|
||||
if !genesisInfo.Exists {
|
||||
_, err = c.ValidateAndInsertBlock(dagParams.GenesisBlock)
|
||||
err = c.ValidateAndInsertBlock(dagParams.GenesisBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -345,7 +312,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
}
|
||||
|
||||
func (f *factory) NewTestConsensus(dagParams *dagconfig.Params, testName string) (
|
||||
tc testapi.TestConsensus, teardown func(keepDataDir bool), err error) {
|
||||
tc testapi.TestConsensus, teardown func(), err error) {
|
||||
|
||||
dataDir, err := ioutil.TempDir("", testName)
|
||||
if err != nil {
|
||||
@@ -356,7 +323,7 @@ func (f *factory) NewTestConsensus(dagParams *dagconfig.Params, testName string)
|
||||
}
|
||||
|
||||
func (f *factory) NewTestConsensusWithDataDir(dagParams *dagconfig.Params, dataDir string) (
|
||||
tc testapi.TestConsensus, teardown func(keepDataDir bool), err error) {
|
||||
tc testapi.TestConsensus, teardown func(), err error) {
|
||||
|
||||
db, err := ldb.NewLevelDB(dataDir)
|
||||
if err != nil {
|
||||
@@ -371,24 +338,16 @@ func (f *factory) NewTestConsensusWithDataDir(dagParams *dagconfig.Params, dataD
|
||||
|
||||
testConsensusStateManager := consensusstatemanager.NewTestConsensusStateManager(consensusAsImplementation.consensusStateManager)
|
||||
|
||||
testTransactionValidator := transactionvalidator.NewTestTransactionValidator(consensusAsImplementation.transactionValidator)
|
||||
|
||||
tstConsensus := &testConsensus{
|
||||
consensus: consensusAsImplementation,
|
||||
testConsensusStateManager: testConsensusStateManager,
|
||||
testReachabilityManager: reachabilitymanager.NewTestReachabilityManager(consensusAsImplementation.
|
||||
reachabilityManager),
|
||||
testTransactionValidator: testTransactionValidator,
|
||||
}
|
||||
tstConsensus.testBlockBuilder = blockbuilder.NewTestBlockBuilder(consensusAsImplementation.blockBuilder, tstConsensus)
|
||||
teardown = func(keepDataDir bool) {
|
||||
teardown = func() {
|
||||
db.Close()
|
||||
if !keepDataDir {
|
||||
err := os.RemoveAll(dataDir)
|
||||
if err != nil {
|
||||
log.Errorf("Error removing data directory for test consensus: %s", err)
|
||||
}
|
||||
}
|
||||
os.RemoveAll(dataDir)
|
||||
}
|
||||
|
||||
return tstConsensus, teardown, nil
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensusserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
|
||||
"github.com/kaspanet/kaspad/domain/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
@@ -24,7 +24,7 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up consensus: %+v", err)
|
||||
}
|
||||
defer teardown(false)
|
||||
defer teardown()
|
||||
|
||||
buildAndInsertBlock := func(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
block, _, err := consensus.BuildBlockWithParents(parentHashes, nil, nil)
|
||||
@@ -32,7 +32,7 @@ func TestFinality(t *testing.T) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = consensus.ValidateAndInsertBlock(block)
|
||||
err = consensus.ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -47,17 +47,17 @@ func TestFinality(t *testing.T) {
|
||||
for i := uint64(0); i < finalityInterval-1; i++ {
|
||||
mainChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{mainChainTipHash})
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed to process Block #%d: %+v", i, err)
|
||||
t.Fatalf("TestFinality: Failed to process Block #%d: %v", i, err)
|
||||
}
|
||||
mainChainTipHash = consensushashing.BlockHash(mainChainTip)
|
||||
mainChainTipHash = consensusserialization.BlockHash(mainChainTip)
|
||||
|
||||
blockInfo, err := consensus.GetBlockInfo(mainChainTipHash)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Block #%d failed to get info: %+v", i, err)
|
||||
t.Fatalf("TestFinality: Block #%d failed to get info: %v", i, err)
|
||||
}
|
||||
if blockInfo.BlockStatus != externalapi.StatusUTXOValid {
|
||||
if blockInfo.BlockStatus != externalapi.StatusValid {
|
||||
t.Fatalf("Block #%d in main chain expected to have status '%s', but got '%s'",
|
||||
i, externalapi.StatusUTXOValid, blockInfo.BlockStatus)
|
||||
i, externalapi.StatusValid, blockInfo.BlockStatus)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err)
|
||||
}
|
||||
sideChainTipHash = consensushashing.BlockHash(sideChainTip)
|
||||
sideChainTipHash = consensusserialization.BlockHash(sideChainTip)
|
||||
|
||||
blockInfo, err := consensus.GetBlockInfo(sideChainTipHash)
|
||||
if err != nil {
|
||||
@@ -89,7 +89,7 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err)
|
||||
}
|
||||
sideChainTipHash = consensushashing.BlockHash(sideChainTip)
|
||||
sideChainTipHash = consensusserialization.BlockHash(sideChainTip)
|
||||
}
|
||||
|
||||
// Make sure that now the sideChainTip is valid and selectedTip
|
||||
@@ -99,15 +99,15 @@ func TestFinality(t *testing.T) {
|
||||
} else if !blockInfo.Exists {
|
||||
t.Fatalf("TestFinality: Failed getting block info, doesn't exists")
|
||||
}
|
||||
if blockInfo.BlockStatus != externalapi.StatusUTXOValid {
|
||||
if blockInfo.BlockStatus != externalapi.StatusValid {
|
||||
t.Fatalf("TestFinality: Overtaking block in side-chain expected to have status '%s', but got '%s'",
|
||||
externalapi.StatusUTXOValid, blockInfo.BlockStatus)
|
||||
externalapi.StatusValid, blockInfo.BlockStatus)
|
||||
}
|
||||
selectedTip, err := consensus.GetVirtualSelectedParent()
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting virtual selectedParent: %v", err)
|
||||
}
|
||||
if *consensushashing.BlockHash(selectedTip) != *sideChainTipHash {
|
||||
if *consensusserialization.BlockHash(selectedTip) != *sideChainTipHash {
|
||||
t.Fatalf("Overtaking block in side-chain is not selectedTip")
|
||||
}
|
||||
|
||||
@@ -117,10 +117,10 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err)
|
||||
}
|
||||
mainChainTipHash = consensushashing.BlockHash(mainChainTip)
|
||||
mainChainTipHash = consensusserialization.BlockHash(mainChainTip)
|
||||
}
|
||||
|
||||
virtualFinality, err := consensus.FinalityManager().VirtualFinalityPoint()
|
||||
virtualFinality, err := consensus.ConsensusStateManager().VirtualFinalityPoint()
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting the virtual's finality point: %v", err)
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err)
|
||||
}
|
||||
sideChainTipHash = consensushashing.BlockHash(sideChainTip)
|
||||
sideChainTipHash = consensusserialization.BlockHash(sideChainTip)
|
||||
}
|
||||
|
||||
// Check that sideChainTip hash higher blue score than the selected parent
|
||||
@@ -145,7 +145,7 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting virtual selectedParent: %v", err)
|
||||
}
|
||||
selectedTipGhostDagData, err := consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), consensushashing.BlockHash(selectedTip))
|
||||
selectedTipGhostDagData, err := consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), consensusserialization.BlockHash(selectedTip))
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting the ghost dag data of the selected tip: %v", err)
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func TestFinality(t *testing.T) {
|
||||
t.Fatalf("TestFinality: Failed getting the ghost dag data of the sidechain tip: %v", err)
|
||||
}
|
||||
|
||||
if selectedTipGhostDagData.BlueWork().Cmp(sideChainTipGhostDagData.BlueWork()) == 1 {
|
||||
if selectedTipGhostDagData.BlueScore > sideChainTipGhostDagData.BlueScore {
|
||||
t.Fatalf("sideChainTip is not the bluest tip when it is expected to be")
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
return nil, false // fo some reason go doesn't recognize that t.Fatalf never returns
|
||||
}
|
||||
|
||||
_, err = consensus.ValidateAndInsertBlock(block)
|
||||
err = consensus.ValidateAndInsertBlock(block)
|
||||
if err == nil {
|
||||
return block, false
|
||||
} else if errors.Is(err, ruleerrors.ErrViolatingBoundedMergeDepth) {
|
||||
@@ -202,7 +202,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
|
||||
processBlock := func(consensus testapi.TestConsensus, block *externalapi.DomainBlock, name string) {
|
||||
_, err := consensus.ValidateAndInsertBlock(block)
|
||||
err := consensus.ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: %s got unexpected error from ProcessBlock: %+v", name, err)
|
||||
|
||||
@@ -214,7 +214,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed building block: %v", err)
|
||||
}
|
||||
_, err = consensus.ValidateAndInsertBlock(block)
|
||||
err = consensus.ValidateAndInsertBlock(block)
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed Inserting block to consensus: %v", err)
|
||||
}
|
||||
@@ -222,7 +222,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
|
||||
getStatus := func(consensus testapi.TestConsensus, block *externalapi.DomainBlock) externalapi.BlockStatus {
|
||||
blockInfo, err := consensus.GetBlockInfo(consensushashing.BlockHash(block))
|
||||
blockInfo, err := consensus.GetBlockInfo(consensusserialization.BlockHash(block))
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed to get block info: %v", err)
|
||||
} else if !blockInfo.Exists {
|
||||
@@ -241,32 +241,32 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err)
|
||||
}
|
||||
defer teardownFunc2(false)
|
||||
defer teardownFunc2()
|
||||
|
||||
// Create a block on top on genesis
|
||||
block1 := buildAndInsertBlock(consensusBuild, []*externalapi.DomainHash{params.GenesisHash})
|
||||
|
||||
// Create a chain
|
||||
selectedChain := make([]*externalapi.DomainBlock, 0, finalityInterval+1)
|
||||
parent := consensushashing.BlockHash(block1)
|
||||
parent := consensusserialization.BlockHash(block1)
|
||||
// Make sure this is always bigger than `blocksChain2` so it will stay the selected chain
|
||||
for i := 0; i < finalityInterval+2; i++ {
|
||||
block := buildAndInsertBlock(consensusBuild, []*externalapi.DomainHash{parent})
|
||||
selectedChain = append(selectedChain, block)
|
||||
parent = consensushashing.BlockHash(block)
|
||||
parent = consensusserialization.BlockHash(block)
|
||||
}
|
||||
|
||||
// Create another chain
|
||||
blocksChain2 := make([]*externalapi.DomainBlock, 0, finalityInterval+1)
|
||||
parent = consensushashing.BlockHash(block1)
|
||||
parent = consensusserialization.BlockHash(block1)
|
||||
for i := 0; i < finalityInterval+1; i++ {
|
||||
block := buildAndInsertBlock(consensusBuild, []*externalapi.DomainHash{parent})
|
||||
blocksChain2 = append(blocksChain2, block)
|
||||
parent = consensushashing.BlockHash(block)
|
||||
parent = consensusserialization.BlockHash(block)
|
||||
}
|
||||
|
||||
// Teardown and assign nil to make sure we use the right DAG from here on.
|
||||
teardownFunc1(false)
|
||||
teardownFunc1()
|
||||
consensusBuild = nil
|
||||
|
||||
// Now test against the real DAG
|
||||
@@ -284,20 +284,20 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
|
||||
// submit a block pointing at tip(chain1) and on first block in chain2 directly
|
||||
mergeDepthViolatingBlockBottom, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(blocksChain2[0]), consensushashing.BlockHash(selectedChain[len(selectedChain)-1])})
|
||||
mergeDepthViolatingBlockBottom, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensusserialization.BlockHash(blocksChain2[0]), consensusserialization.BlockHash(selectedChain[len(selectedChain)-1])})
|
||||
if !isViolatingMergeDepth {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected mergeDepthViolatingBlockBottom to violate merge depth")
|
||||
}
|
||||
|
||||
// submit a block pointing at tip(chain1) and tip(chain2) should also obviously violate merge depth (this points at first block in chain2 indirectly)
|
||||
mergeDepthViolatingTop, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(blocksChain2[len(blocksChain2)-1]), consensushashing.BlockHash(selectedChain[len(selectedChain)-1])})
|
||||
mergeDepthViolatingTop, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensusserialization.BlockHash(blocksChain2[len(blocksChain2)-1]), consensusserialization.BlockHash(selectedChain[len(selectedChain)-1])})
|
||||
if !isViolatingMergeDepth {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected mergeDepthViolatingTop to violate merge depth")
|
||||
}
|
||||
|
||||
// the location of the parents in the slices need to be both `-X` so the `selectedChain` one will have higher blueScore (it's a chain longer by 1)
|
||||
kosherizingBlock, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(blocksChain2[len(blocksChain2)-3]), consensushashing.BlockHash(selectedChain[len(selectedChain)-3])})
|
||||
kosherizingBlockHash := consensushashing.BlockHash(kosherizingBlock)
|
||||
kosherizingBlock, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensusserialization.BlockHash(blocksChain2[len(blocksChain2)-3]), consensusserialization.BlockHash(selectedChain[len(selectedChain)-3])})
|
||||
kosherizingBlockHash := consensusserialization.BlockHash(kosherizingBlock)
|
||||
if isViolatingMergeDepth {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected blueKosherizingBlock to not violate merge depth")
|
||||
}
|
||||
@@ -308,7 +308,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
// Make sure it's actually blue
|
||||
found := false
|
||||
for _, blue := range virtualGhotDagData.MergeSetBlues() {
|
||||
for _, blue := range virtualGhotDagData.MergeSetBlues {
|
||||
if *blue == *kosherizingBlockHash {
|
||||
found = true
|
||||
break
|
||||
@@ -318,7 +318,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected kosherizingBlock to be blue by the virtual")
|
||||
}
|
||||
|
||||
pointAtBlueKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{kosherizingBlockHash, consensushashing.BlockHash(selectedChain[len(selectedChain)-1])})
|
||||
pointAtBlueKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{kosherizingBlockHash, consensusserialization.BlockHash(selectedChain[len(selectedChain)-1])})
|
||||
if isViolatingMergeDepth {
|
||||
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected selectedTip to not violate merge depth")
|
||||
@@ -329,16 +329,16 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed getting the virtual selected parent %v", err)
|
||||
}
|
||||
|
||||
if *consensushashing.BlockHash(virtualSelectedParent) != *consensushashing.BlockHash(pointAtBlueKosherizing) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", consensushashing.BlockHash(pointAtBlueKosherizing), consensushashing.BlockHash(virtualSelectedParent))
|
||||
if *consensusserialization.BlockHash(virtualSelectedParent) != *consensusserialization.BlockHash(pointAtBlueKosherizing) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", consensusserialization.BlockHash(pointAtBlueKosherizing), consensusserialization.BlockHash(virtualSelectedParent))
|
||||
}
|
||||
|
||||
// Now let's make the kosherizing block red and try to merge again
|
||||
tip := consensushashing.BlockHash(selectedChain[len(selectedChain)-1])
|
||||
tip := consensusserialization.BlockHash(selectedChain[len(selectedChain)-1])
|
||||
// we use k-1 because `kosherizingBlock` points at tip-2, so 2+k-1 = k+1 anticone.
|
||||
for i := 0; i < int(params.K)-1; i++ {
|
||||
block := buildAndInsertBlock(consensusReal, []*externalapi.DomainHash{tip})
|
||||
tip = consensushashing.BlockHash(block)
|
||||
tip = consensusserialization.BlockHash(block)
|
||||
}
|
||||
|
||||
virtualSelectedParent, err = consensusReal.GetVirtualSelectedParent()
|
||||
@@ -346,8 +346,8 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed getting the virtual selected parent %v", err)
|
||||
}
|
||||
|
||||
if *consensushashing.BlockHash(virtualSelectedParent) != *tip {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", tip, consensushashing.BlockHash(virtualSelectedParent))
|
||||
if *consensusserialization.BlockHash(virtualSelectedParent) != *tip {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", tip, consensusserialization.BlockHash(virtualSelectedParent))
|
||||
}
|
||||
|
||||
virtualGhotDagData, err = consensusReal.GHOSTDAGDataStore().Get(consensusReal.DatabaseContext(), model.VirtualBlockHash)
|
||||
@@ -356,7 +356,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
// Make sure it's actually blue
|
||||
found = false
|
||||
for _, blue := range virtualGhotDagData.MergeSetBlues() {
|
||||
for _, blue := range virtualGhotDagData.MergeSetBlues {
|
||||
if *blue == *kosherizingBlockHash {
|
||||
found = true
|
||||
break
|
||||
@@ -372,7 +372,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now `pointAtBlueKosherizing` itself is actually still blue, so we can still point at that even though we can't point at kosherizing directly anymore
|
||||
transitiveBlueKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(pointAtBlueKosherizing), tip})
|
||||
transitiveBlueKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensusserialization.BlockHash(pointAtBlueKosherizing), tip})
|
||||
if isViolatingMergeDepth {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected transitiveBlueKosherizing to not violate merge depth")
|
||||
}
|
||||
@@ -382,19 +382,19 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed getting the virtual selected parent %v", err)
|
||||
}
|
||||
|
||||
if *consensushashing.BlockHash(virtualSelectedParent) != *consensushashing.BlockHash(transitiveBlueKosherizing) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", consensushashing.BlockHash(transitiveBlueKosherizing), consensushashing.BlockHash(virtualSelectedParent))
|
||||
if *consensusserialization.BlockHash(virtualSelectedParent) != *consensusserialization.BlockHash(transitiveBlueKosherizing) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", consensusserialization.BlockHash(transitiveBlueKosherizing), consensusserialization.BlockHash(virtualSelectedParent))
|
||||
}
|
||||
|
||||
// Lets validate the status of all the interesting blocks
|
||||
if getStatus(consensusReal, pointAtBlueKosherizing) != externalapi.StatusUTXOValid {
|
||||
t.Fatalf("TestBoundedMergeDepth: pointAtBlueKosherizing expected status '%s' but got '%s'", externalapi.StatusUTXOValid, getStatus(consensusReal, pointAtBlueKosherizing))
|
||||
if getStatus(consensusReal, pointAtBlueKosherizing) != externalapi.StatusValid {
|
||||
t.Fatalf("TestBoundedMergeDepth: pointAtBlueKosherizing expected status '%s' but got '%s'", externalapi.StatusValid, getStatus(consensusReal, pointAtBlueKosherizing))
|
||||
}
|
||||
if getStatus(consensusReal, pointAtRedKosherizing) != externalapi.StatusInvalid {
|
||||
t.Fatalf("TestBoundedMergeDepth: pointAtRedKosherizing expected status '%s' but got '%s'", externalapi.StatusInvalid, getStatus(consensusReal, pointAtRedKosherizing))
|
||||
}
|
||||
if getStatus(consensusReal, transitiveBlueKosherizing) != externalapi.StatusUTXOValid {
|
||||
t.Fatalf("TestBoundedMergeDepth: transitiveBlueKosherizing expected status '%s' but got '%s'", externalapi.StatusUTXOValid, getStatus(consensusReal, transitiveBlueKosherizing))
|
||||
if getStatus(consensusReal, transitiveBlueKosherizing) != externalapi.StatusValid {
|
||||
t.Fatalf("TestBoundedMergeDepth: transitiveBlueKosherizing expected status '%s' but got '%s'", externalapi.StatusValid, getStatus(consensusReal, transitiveBlueKosherizing))
|
||||
}
|
||||
if getStatus(consensusReal, mergeDepthViolatingBlockBottom) != externalapi.StatusInvalid {
|
||||
t.Fatalf("TestBoundedMergeDepth: mergeDepthViolatingBlockBottom expected status '%s' but got '%s'", externalapi.StatusInvalid, getStatus(consensusReal, mergeDepthViolatingBlockBottom))
|
||||
@@ -412,8 +412,8 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for i, b := range selectedChain {
|
||||
if getStatus(consensusReal, b) != externalapi.StatusUTXOValid {
|
||||
t.Fatalf("selectedChain[%d] expected status '%s' but got '%s'", i, externalapi.StatusUTXOValid, getStatus(consensusReal, b))
|
||||
if getStatus(consensusReal, b) != externalapi.StatusValid {
|
||||
t.Fatalf("selectedChain[%d] expected status '%s' but got '%s'", i, externalapi.StatusValid, getStatus(consensusReal, b))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
)
|
||||
|
||||
var log, _ = logger.Get(logger.SubsystemTags.BDAG)
|
||||
@@ -4,5 +4,6 @@ package externalapi
|
||||
type BlockInfo struct {
|
||||
Exists bool
|
||||
BlockStatus BlockStatus
|
||||
BlueScore uint64
|
||||
|
||||
IsBlockInHeaderPruningPointFuture bool
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ const (
|
||||
// StatusInvalid indicates that the block is invalid.
|
||||
StatusInvalid BlockStatus = iota
|
||||
|
||||
// StatusUTXOValid indicates the block is valid from any UTXO related aspects and has passed all the other validations as well.
|
||||
StatusUTXOValid
|
||||
// StatusValid indicates that the block has been fully validated.
|
||||
StatusValid
|
||||
|
||||
// StatusUTXOPendingVerification indicates that the block is pending verification against its past UTXO-Set, either
|
||||
// because it was not yet verified since the block was never in the selected parent chain, or if the
|
||||
@@ -29,7 +29,7 @@ const (
|
||||
|
||||
var blockStatusStrings = map[BlockStatus]string{
|
||||
StatusInvalid: "Invalid",
|
||||
StatusUTXOValid: "Valid",
|
||||
StatusValid: "Valid",
|
||||
StatusUTXOPendingVerification: "UTXOPendingVerification",
|
||||
StatusDisqualifiedFromChain: "DisqualifiedFromChain",
|
||||
StatusHeaderOnly: "HeaderOnly",
|
||||
|
||||
@@ -3,7 +3,7 @@ package externalapi
|
||||
// Consensus maintains the current core state of the node
|
||||
type Consensus interface {
|
||||
BuildBlock(coinbaseData *DomainCoinbaseData, transactions []*DomainTransaction) (*DomainBlock, error)
|
||||
ValidateAndInsertBlock(block *DomainBlock) (*BlockInsertionResult, error)
|
||||
ValidateAndInsertBlock(block *DomainBlock) error
|
||||
ValidateTransactionAndPopulateWithConsensusData(transaction *DomainTransaction) error
|
||||
|
||||
GetBlock(blockHash *DomainHash) (*DomainBlock, error)
|
||||
@@ -13,11 +13,9 @@ type Consensus interface {
|
||||
GetHashesBetween(lowHash, highHash *DomainHash) ([]*DomainHash, error)
|
||||
GetMissingBlockBodyHashes(highHash *DomainHash) ([]*DomainHash, error)
|
||||
GetPruningPointUTXOSet(expectedPruningPointHash *DomainHash) ([]byte, error)
|
||||
ValidateAndInsertPruningPoint(newPruningPoint *DomainBlock, serializedUTXOSet []byte) error
|
||||
SetPruningPointUTXOSet(serializedUTXOSet []byte) error
|
||||
GetVirtualSelectedParent() (*DomainBlock, error)
|
||||
CreateBlockLocator(lowHash, highHash *DomainHash, limit uint32) (BlockLocator, error)
|
||||
CreateBlockLocator(lowHash, highHash *DomainHash) (BlockLocator, error)
|
||||
FindNextBlockLocatorBoundaries(blockLocator BlockLocator) (lowHash, highHash *DomainHash, err error)
|
||||
GetSyncInfo() (*SyncInfo, error)
|
||||
Tips() ([]*DomainHash, error)
|
||||
GetVirtualInfo() (*VirtualInfo, error)
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package externalapi
|
||||
|
||||
// BlockInsertionResult is auxiliary data returned from ValidateAndInsertBlock
|
||||
type BlockInsertionResult struct {
|
||||
SelectedParentChainChanges *SelectedParentChainChanges
|
||||
}
|
||||
|
||||
// SelectedParentChainChanges is the set of changes made to the selected parent chain
|
||||
type SelectedParentChainChanges struct {
|
||||
Added []*DomainHash
|
||||
Removed []*DomainHash
|
||||
}
|
||||
@@ -1,8 +1,40 @@
|
||||
package externalapi
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Each of the following represent one of the possible sync
|
||||
// states of the consensus
|
||||
const (
|
||||
SyncStateRelay SyncState = iota
|
||||
SyncStateMissingGenesis
|
||||
SyncStateHeadersFirst
|
||||
SyncStateMissingUTXOSet
|
||||
SyncStateMissingBlockBodies
|
||||
)
|
||||
|
||||
// SyncState represents the current sync state of the consensus
|
||||
type SyncState uint8
|
||||
|
||||
func (s SyncState) String() string {
|
||||
switch s {
|
||||
case SyncStateRelay:
|
||||
return "SyncStateRelay"
|
||||
case SyncStateMissingGenesis:
|
||||
return "SyncStateMissingGenesis"
|
||||
case SyncStateHeadersFirst:
|
||||
return "SyncStateHeadersFirst"
|
||||
case SyncStateMissingUTXOSet:
|
||||
return "SyncStateMissingUTXOSet"
|
||||
case SyncStateMissingBlockBodies:
|
||||
return "SyncStateMissingBlockBodies"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("<unknown state (%d)>", s)
|
||||
}
|
||||
|
||||
// SyncInfo holds info about the current sync state of the consensus
|
||||
type SyncInfo struct {
|
||||
IsAwaitingUTXOSet bool
|
||||
State SyncState
|
||||
IBDRootUTXOBlockHash *DomainHash
|
||||
HeaderCount uint64
|
||||
BlockCount uint64
|
||||
|
||||
@@ -19,7 +19,7 @@ type DomainTransaction struct {
|
||||
Mass uint64
|
||||
|
||||
// ID is a field that is used to cache the transaction ID.
|
||||
// Always use consensushashing.TransactionID instead of accessing this field directly
|
||||
// Always use consensusserialization.TransactionID instead of accessing this field directly
|
||||
ID *DomainTransactionID
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ type DomainTransactionInput struct {
|
||||
SignatureScript []byte
|
||||
Sequence uint64
|
||||
|
||||
UTXOEntry UTXOEntry
|
||||
UTXOEntry *UTXOEntry
|
||||
}
|
||||
|
||||
// Clone returns a clone of DomainTransactionInput
|
||||
@@ -78,7 +78,7 @@ func (input *DomainTransactionInput) Clone() *DomainTransactionInput {
|
||||
PreviousOutpoint: *input.PreviousOutpoint.Clone(),
|
||||
SignatureScript: signatureScriptClone,
|
||||
Sequence: input.Sequence,
|
||||
UTXOEntry: input.UTXOEntry,
|
||||
UTXOEntry: input.UTXOEntry.Clone(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,9 +4,36 @@ package externalapi
|
||||
// set such as whether or not it was contained in a coinbase tx, the blue
|
||||
// score of the block that accepts the tx, its public key script, and how
|
||||
// much it pays.
|
||||
type UTXOEntry interface {
|
||||
Amount() uint64
|
||||
ScriptPublicKey() []byte // The public key script for the output.
|
||||
BlockBlueScore() uint64 // Blue score of the block accepting the tx.
|
||||
IsCoinbase() bool
|
||||
type UTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey []byte // The public key script for the output.
|
||||
BlockBlueScore uint64 // Blue score of the block accepting the tx.
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
// Clone returns a clone of UTXOEntry
|
||||
func (entry *UTXOEntry) Clone() *UTXOEntry {
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scriptPublicKeyClone := make([]byte, len(entry.ScriptPublicKey))
|
||||
copy(scriptPublicKeyClone, entry.ScriptPublicKey)
|
||||
|
||||
return &UTXOEntry{
|
||||
Amount: entry.Amount,
|
||||
ScriptPublicKey: scriptPublicKeyClone,
|
||||
BlockBlueScore: entry.BlockBlueScore,
|
||||
IsCoinbase: entry.IsCoinbase,
|
||||
}
|
||||
}
|
||||
|
||||
// NewUTXOEntry creates a new utxoEntry representing the given txOut
|
||||
func NewUTXOEntry(amount uint64, scriptPubKey []byte, isCoinbase bool, blockBlueScore uint64) *UTXOEntry {
|
||||
return &UTXOEntry{
|
||||
Amount: amount,
|
||||
ScriptPublicKey: scriptPubKey,
|
||||
BlockBlueScore: blockBlueScore,
|
||||
IsCoinbase: isCoinbase,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package externalapi
|
||||
|
||||
// VirtualInfo represents information about the virtual block needed by external components
|
||||
type VirtualInfo struct {
|
||||
ParentHashes []*DomainHash
|
||||
Bits uint32
|
||||
PastMedianTime int64
|
||||
}
|
||||
@@ -1,19 +1,34 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// BlockGHOSTDAGData represents GHOSTDAG data for some block
|
||||
type BlockGHOSTDAGData interface {
|
||||
BlueScore() uint64
|
||||
BlueWork() *big.Int
|
||||
SelectedParent() *externalapi.DomainHash
|
||||
MergeSetBlues() []*externalapi.DomainHash
|
||||
MergeSetReds() []*externalapi.DomainHash
|
||||
BluesAnticoneSizes() map[externalapi.DomainHash]KType
|
||||
type BlockGHOSTDAGData struct {
|
||||
BlueScore uint64
|
||||
SelectedParent *externalapi.DomainHash
|
||||
MergeSetBlues []*externalapi.DomainHash
|
||||
MergeSetReds []*externalapi.DomainHash
|
||||
BluesAnticoneSizes map[externalapi.DomainHash]KType
|
||||
}
|
||||
|
||||
// Clone returns a clone of BlockGHOSTDAGData
|
||||
func (bgd *BlockGHOSTDAGData) Clone() *BlockGHOSTDAGData {
|
||||
if bgd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bluesAnticoneSizesClone := make(map[externalapi.DomainHash]KType, len(bgd.BluesAnticoneSizes))
|
||||
for hash, size := range bgd.BluesAnticoneSizes {
|
||||
bluesAnticoneSizesClone[hash] = size
|
||||
}
|
||||
|
||||
return &BlockGHOSTDAGData{
|
||||
BlueScore: bgd.BlueScore,
|
||||
SelectedParent: bgd.SelectedParent.Clone(),
|
||||
MergeSetBlues: externalapi.CloneHashes(bgd.MergeSetBlues),
|
||||
MergeSetReds: externalapi.CloneHashes(bgd.MergeSetReds),
|
||||
BluesAnticoneSizes: bluesAnticoneSizesClone,
|
||||
}
|
||||
}
|
||||
|
||||
// KType defines the size of GHOSTDAG consensus algorithm K parameter.
|
||||
|
||||
@@ -7,9 +7,9 @@ type ConsensusStateStore interface {
|
||||
Store
|
||||
IsStaged() bool
|
||||
|
||||
StageVirtualUTXODiff(virtualUTXODiff UTXODiff) error
|
||||
StageVirtualUTXODiff(virtualUTXODiff *UTXODiff) error
|
||||
StageVirtualUTXOSet(virtualUTXOSetIterator ReadOnlyUTXOSetIterator) error
|
||||
UTXOByOutpoint(dbContext DBReader, outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, error)
|
||||
UTXOByOutpoint(dbContext DBReader, outpoint *externalapi.DomainOutpoint) (*externalapi.UTXOEntry, error)
|
||||
HasUTXOByOutpoint(dbContext DBReader, outpoint *externalapi.DomainOutpoint) (bool, error)
|
||||
VirtualUTXOSetIterator(dbContext DBReader) (ReadOnlyUTXOSetIterator, error)
|
||||
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
// FinalityStore represents a store for finality data
|
||||
type FinalityStore interface {
|
||||
Store
|
||||
IsStaged() bool
|
||||
StageFinalityPoint(blockHash *externalapi.DomainHash, finalityPointHash *externalapi.DomainHash)
|
||||
FinalityPoint(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
// GHOSTDAGDataStore represents a store of BlockGHOSTDAGData
|
||||
type GHOSTDAGDataStore interface {
|
||||
Store
|
||||
Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData BlockGHOSTDAGData)
|
||||
Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *BlockGHOSTDAGData)
|
||||
IsStaged() bool
|
||||
Get(dbContext DBReader, blockHash *externalapi.DomainHash) (BlockGHOSTDAGData, error)
|
||||
Get(dbContext DBReader, blockHash *externalapi.DomainHash) (*BlockGHOSTDAGData, error)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@ package model
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// HeaderSelectedTipStore represents a store of the headers selected tip
|
||||
type HeaderSelectedTipStore interface {
|
||||
// HeaderTipsStore represents a store of the header tips
|
||||
type HeaderTipsStore interface {
|
||||
Store
|
||||
Stage(selectedTip *externalapi.DomainHash)
|
||||
Stage(tips []*externalapi.DomainHash)
|
||||
IsStaged() bool
|
||||
HeadersSelectedTip(dbContext DBReader) (*externalapi.DomainHash, error)
|
||||
Has(dbContext DBReader) (bool, error)
|
||||
Tips(dbContext DBReader) ([]*externalapi.DomainHash, error)
|
||||
HasTips(dbContext DBReader) (bool, error)
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
// UTXODiffStore represents a store of UTXODiffs
|
||||
type UTXODiffStore interface {
|
||||
Store
|
||||
Stage(blockHash *externalapi.DomainHash, utxoDiff UTXODiff, utxoDiffChild *externalapi.DomainHash)
|
||||
Stage(blockHash *externalapi.DomainHash, utxoDiff *UTXODiff, utxoDiffChild *externalapi.DomainHash)
|
||||
IsStaged() bool
|
||||
UTXODiff(dbContext DBReader, blockHash *externalapi.DomainHash) (UTXODiff, error)
|
||||
UTXODiff(dbContext DBReader, blockHash *externalapi.DomainHash) (*UTXODiff, error)
|
||||
UTXODiffChild(dbContext DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
HasUTXODiffChild(dbContext DBReader, blockHash *externalapi.DomainHash) (bool, error)
|
||||
Delete(blockHash *externalapi.DomainHash)
|
||||
|
||||
@@ -6,3 +6,13 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
type BlockBuilder interface {
|
||||
BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error)
|
||||
}
|
||||
|
||||
// TestBlockBuilder adds to the main BlockBuilder methods required by tests
|
||||
type TestBlockBuilder interface {
|
||||
BlockBuilder
|
||||
|
||||
// BuildBlockWithParents builds a block with provided parents, coinbaseData and transactions,
|
||||
// and returns the block together with its past UTXO-diff from the virtual.
|
||||
BuildBlockWithParents(parentHashes []*externalapi.DomainHash, coinbaseData *externalapi.DomainCoinbaseData,
|
||||
transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, *UTXODiff, error)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,5 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// BlockProcessor is responsible for processing incoming blocks
|
||||
type BlockProcessor interface {
|
||||
ValidateAndInsertBlock(block *externalapi.DomainBlock) (*externalapi.BlockInsertionResult, error)
|
||||
ValidateAndInsertPruningPoint(newPruningPoint *externalapi.DomainBlock, serializedUTXOSet []byte) error
|
||||
ValidateAndInsertBlock(block *externalapi.DomainBlock) error
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// BlockValidator exposes a set of validation classes, after which
|
||||
// it's possible to determine whether a block is valid
|
||||
@@ -10,6 +8,6 @@ type BlockValidator interface {
|
||||
ValidateHeaderInIsolation(blockHash *externalapi.DomainHash) error
|
||||
ValidateBodyInIsolation(blockHash *externalapi.DomainHash) error
|
||||
ValidateHeaderInContext(blockHash *externalapi.DomainHash) error
|
||||
ValidateBodyInContext(blockHash *externalapi.DomainHash, isPruningPoint bool) error
|
||||
ValidatePruningPointViolationAndProofOfWorkAndDifficulty(blockHash *externalapi.DomainHash) error
|
||||
ValidateBodyInContext(blockHash *externalapi.DomainHash) error
|
||||
ValidateProofOfWorkAndDifficulty(blockHash *externalapi.DomainHash) error
|
||||
}
|
||||
|
||||
@@ -7,6 +7,4 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
type CoinbaseManager interface {
|
||||
ExpectedCoinbaseTransaction(blockHash *externalapi.DomainHash,
|
||||
coinbaseData *externalapi.DomainCoinbaseData) (*externalapi.DomainTransaction, error)
|
||||
ExtractCoinbaseDataAndBlueScore(coinbaseTx *externalapi.DomainTransaction) (blueScore uint64,
|
||||
coinbaseData *externalapi.DomainCoinbaseData, err error)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,19 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// ConsensusStateManager manages the node's consensus state
|
||||
type ConsensusStateManager interface {
|
||||
AddBlock(blockHash *externalapi.DomainHash) (*externalapi.SelectedParentChainChanges, error)
|
||||
AddBlockToVirtual(blockHash *externalapi.DomainHash) error
|
||||
PopulateTransactionWithUTXOEntries(transaction *externalapi.DomainTransaction) error
|
||||
UpdatePruningPoint(newPruningPoint *externalapi.DomainBlock, serializedUTXOSet []byte) error
|
||||
SetPruningPointUTXOSet(serializedUTXOSet []byte) error
|
||||
RestorePastUTXOSetIterator(blockHash *externalapi.DomainHash) (ReadOnlyUTXOSetIterator, error)
|
||||
CalculatePastUTXOAndAcceptanceData(blockHash *externalapi.DomainHash) (UTXODiff, AcceptanceData, Multiset, error)
|
||||
HeaderTipsPruningPoint() (*externalapi.DomainHash, error)
|
||||
CalculatePastUTXOAndAcceptanceData(blockHash *externalapi.DomainHash) (*UTXODiff, AcceptanceData, Multiset, error)
|
||||
}
|
||||
|
||||
// TestConsensusStateManager adds to the main ConsensusStateManager methods required by tests
|
||||
type TestConsensusStateManager interface {
|
||||
ConsensusStateManager
|
||||
AddUTXOToMultiset(multiset Multiset, entry *externalapi.UTXOEntry,
|
||||
outpoint *externalapi.DomainOutpoint) error
|
||||
ResolveBlockStatus(blockHash *externalapi.DomainHash) (externalapi.BlockStatus, error)
|
||||
VirtualFinalityPoint() (*externalapi.DomainHash, error)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ type DAGTopologyManager interface {
|
||||
IsDescendantOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
|
||||
IsAncestorOfAny(blockHash *externalapi.DomainHash, potentialDescendants []*externalapi.DomainHash) (bool, error)
|
||||
IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
|
||||
ChildInSelectedParentChainOf(context, highHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
|
||||
SetParents(blockHash *externalapi.DomainHash, parentHashes []*externalapi.DomainHash) error
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ type DAGTraversalManager interface {
|
||||
SelectedParentIterator(highHash *externalapi.DomainHash) BlockIterator
|
||||
SelectedChildIterator(highHash, lowHash *externalapi.DomainHash) (BlockIterator, error)
|
||||
AnticoneFromContext(context, lowHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error)
|
||||
BlueWindow(highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.DomainHash, error)
|
||||
BlueWindow(highHash *externalapi.DomainHash, windowSize uint64) ([]*externalapi.DomainHash, error)
|
||||
NewDownHeap() BlockHeap
|
||||
NewUpHeap() BlockHeap
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package model
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// FinalityManager provides method to validate that a block does not violate finality
|
||||
type FinalityManager interface {
|
||||
VirtualFinalityPoint() (*externalapi.DomainHash, error)
|
||||
FinalityPoint(blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
}
|
||||
@@ -6,6 +6,6 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
type GHOSTDAGManager interface {
|
||||
GHOSTDAG(blockHash *externalapi.DomainHash) error
|
||||
ChooseSelectedParent(blockHashes ...*externalapi.DomainHash) (*externalapi.DomainHash, error)
|
||||
Less(blockHashA *externalapi.DomainHash, ghostdagDataA BlockGHOSTDAGData,
|
||||
blockHashB *externalapi.DomainHash, ghostdagDataB BlockGHOSTDAGData) bool
|
||||
Less(blockHashA *externalapi.DomainHash, ghostdagDataA *BlockGHOSTDAGData,
|
||||
blockHashB *externalapi.DomainHash, ghostdagDataB *BlockGHOSTDAGData) bool
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ package model
|
||||
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// HeadersSelectedTipManager manages the state of the headers selected tip
|
||||
type HeadersSelectedTipManager interface {
|
||||
// HeaderTipsManager manages the state of the header tips
|
||||
type HeaderTipsManager interface {
|
||||
AddHeaderTip(hash *externalapi.DomainHash) error
|
||||
SelectedTip() (*externalapi.DomainHash, error)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,6 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// PruningManager resolves and manages the current pruning point
|
||||
type PruningManager interface {
|
||||
UpdatePruningPointByVirtual() error
|
||||
CalculatePruningPointByHeaderSelectedTip() (*externalapi.DomainHash, error)
|
||||
FindNextPruningPoint() error
|
||||
PruningPoint() (*externalapi.DomainHash, error)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user