Compare commits

..

2 Commits

Author SHA1 Message Date
Kirill
b88e34fd84 [NOD-1313] Refactor AddressManager (#918)
* [NOD-1313] Refactor AddressManager.

* [NOD-1313]Remove old tests.Minor improvements,fixes.

* [NOD-1313] After merge fixes. Fix import cycle.

* [NOD-1313] Integration tests fixes.

* [NOD-1313] Allocate new slice for the returned key.

* [NOD-1313] AddressManager improvements and fixes.

* Move local and banned addresses to separate lists.
* Move AddressManager config to the separate file.
* Add LocalAddressManager.
* Remove redundant KnownAddress structure.
* Restore local addresses functionality.
* Call initListeners from the LocalAddressManager.
* AddressManager minor improvements and fixes.

* [NOD-1313] Minor fixes.

* [NOD-1313] Implement HandleGetPeerAddresses. Refactoring.

* [NOD-1313] After-merge fixes.

* [NOD-1313] Minor improvements.

* AddressManager: added BannedAddresses() method.
* AddressManager: HandleGetPeerAddresses() add banned addresses
  separately.
* AddressManager: remove addressEntry redundant struct.
* ConnectionManager: checkOutgoingConnections() minor improvements and
  fixes.
* Minor refactoring.
* Minor fixes.

* [NOD-1313] GetPeerAddresses RPC message update

* GetPeerAddresses RPC: add BannedAddresses in the separate field.
* Update protobuf.
2020-10-08 17:05:47 +03:00
Ori Newman
689098082f [NOD-1444] Implement getHeaders RPC command (#944)
* [NOD-1444] Implement getHeaders RPC command

* [NOD-1444] Fix tests and comments

* [NOD-1444] Fix error message

* [NOD-1444] Make GetHeaders propagate header serialization errors

* [NOD-1444] RLock the dag on GetHeaders

* [NOD-1444] Change the error field number to 1000
2020-10-06 12:40:32 +03:00
901 changed files with 48351 additions and 94711 deletions

View File

@@ -1,9 +0,0 @@
coverage:
status:
patch: off
project:
default:
informational: true
ignore:
- "**/*.pb.go" # Ignore all auto generated protobuf structures.

View File

@@ -1,196 +0,0 @@
<#
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
# Source: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
.SYNOPSIS
Configure Pagefile on Windows machine
.NOTES
Author: Aleksandr Chebotov
.EXAMPLE
SetPageFileSize.ps1 -MinimumSize 4GB -MaximumSize 8GB -DiskRoot "D:"
#>
param(
[System.UInt64] $MinimumSize = 8gb ,
[System.UInt64] $MaximumSize = 8gb ,
[System.String] $DiskRoot = "D:"
)
# https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/NativeMethods.cs,619688d876febbe1
# https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/mm/modwrite/create.htm
# https://referencesource.microsoft.com/#mscorlib/microsoft/win32/safehandles/safefilehandle.cs,9b08210f3be75520
# https://referencesource.microsoft.com/#mscorlib/system/security/principal/tokenaccesslevels.cs,6eda91f498a38586
# https://www.autoitscript.com/forum/topic/117993-api-ntcreatepagingfile/
$source = @'
using System;
using System.ComponentModel;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Security.Principal;
using System.Text;
using Microsoft.Win32;
using Microsoft.Win32.SafeHandles;
namespace Util
{
class NativeMethods
{
[StructLayout(LayoutKind.Sequential)]
internal struct LUID
{
internal uint LowPart;
internal uint HighPart;
}
[StructLayout(LayoutKind.Sequential)]
internal struct LUID_AND_ATTRIBUTES
{
internal LUID Luid;
internal uint Attributes;
}
[StructLayout(LayoutKind.Sequential)]
internal struct TOKEN_PRIVILEGE
{
internal uint PrivilegeCount;
internal LUID_AND_ATTRIBUTES Privilege;
internal static readonly uint Size = (uint)Marshal.SizeOf(typeof(TOKEN_PRIVILEGE));
}
[StructLayoutAttribute(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
internal struct UNICODE_STRING
{
internal UInt16 length;
internal UInt16 maximumLength;
internal string buffer;
}
[DllImport("kernel32.dll", SetLastError=true)]
internal static extern IntPtr LocalFree(IntPtr handle);
[DllImport("advapi32.dll", ExactSpelling = true, CharSet = CharSet.Unicode, SetLastError = true, PreserveSig = false)]
internal static extern bool LookupPrivilegeValueW(
[In] string lpSystemName,
[In] string lpName,
[Out] out LUID luid
);
[DllImport("advapi32.dll", SetLastError = true, PreserveSig = false)]
internal static extern bool AdjustTokenPrivileges(
[In] SafeCloseHandle tokenHandle,
[In] bool disableAllPrivileges,
[In] ref TOKEN_PRIVILEGE newState,
[In] uint bufferLength,
[Out] out TOKEN_PRIVILEGE previousState,
[Out] out uint returnLength
);
[DllImport("advapi32.dll", CharSet = CharSet.Auto, SetLastError = true, PreserveSig = false)]
internal static extern bool OpenProcessToken(
[In] IntPtr processToken,
[In] int desiredAccess,
[Out] out SafeCloseHandle tokenHandle
);
[DllImport("ntdll.dll", CharSet = CharSet.Unicode, SetLastError = true, CallingConvention = CallingConvention.StdCall)]
internal static extern Int32 NtCreatePagingFile(
[In] ref UNICODE_STRING pageFileName,
[In] ref Int64 minimumSize,
[In] ref Int64 maximumSize,
[In] UInt32 flags
);
[DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
internal static extern uint QueryDosDeviceW(
string lpDeviceName,
StringBuilder lpTargetPath,
int ucchMax
);
}
public sealed class SafeCloseHandle: SafeHandleZeroOrMinusOneIsInvalid
{
[DllImport("kernel32.dll", ExactSpelling = true, SetLastError = true)]
internal extern static bool CloseHandle(IntPtr handle);
private SafeCloseHandle() : base(true)
{
}
public SafeCloseHandle(IntPtr preexistingHandle, bool ownsHandle) : base(ownsHandle)
{
SetHandle(preexistingHandle);
}
override protected bool ReleaseHandle()
{
return CloseHandle(handle);
}
}
public class PageFile
{
public static void SetPageFileSize(long minimumValue, long maximumValue, string lpDeviceName)
{
SetPageFilePrivilege();
StringBuilder lpTargetPath = new StringBuilder(260);
UInt32 resultQueryDosDevice = NativeMethods.QueryDosDeviceW(lpDeviceName, lpTargetPath, lpTargetPath.Capacity);
if (resultQueryDosDevice == 0)
{
throw new Win32Exception(Marshal.GetLastWin32Error());
}
string pageFilePath = lpTargetPath.ToString() + "\\pagefile.sys";
NativeMethods.UNICODE_STRING pageFileName = new NativeMethods.UNICODE_STRING
{
length = (ushort)(pageFilePath.Length * 2),
maximumLength = (ushort)(2 * (pageFilePath.Length + 1)),
buffer = pageFilePath
};
Int32 resultNtCreatePagingFile = NativeMethods.NtCreatePagingFile(ref pageFileName, ref minimumValue, ref maximumValue, 0);
if (resultNtCreatePagingFile != 0)
{
throw new Win32Exception(Marshal.GetLastWin32Error());
}
Console.WriteLine("PageFile: {0} / {1} bytes for {2}", minimumValue, maximumValue, pageFilePath);
}
static void SetPageFilePrivilege()
{
const int SE_PRIVILEGE_ENABLED = 0x00000002;
const int AdjustPrivileges = 0x00000020;
const int Query = 0x00000008;
NativeMethods.LUID luid;
NativeMethods.LookupPrivilegeValueW(null, "SeCreatePagefilePrivilege", out luid);
SafeCloseHandle hToken;
NativeMethods.OpenProcessToken(
Process.GetCurrentProcess().Handle,
AdjustPrivileges | Query,
out hToken
);
NativeMethods.TOKEN_PRIVILEGE previousState;
NativeMethods.TOKEN_PRIVILEGE newState;
uint previousSize = 0;
newState.PrivilegeCount = 1;
newState.Privilege.Luid = luid;
newState.Privilege.Attributes = SE_PRIVILEGE_ENABLED;
NativeMethods.AdjustTokenPrivileges(hToken, false, ref newState, NativeMethods.TOKEN_PRIVILEGE.Size, out previousState, out previousSize);
}
}
}
'@
Add-Type -TypeDefinition $source
# Set SetPageFileSize
[Util.PageFile]::SetPageFileSize($minimumSize, $maximumSize, $diskRoot)

View File

@@ -1,77 +0,0 @@
name: Build and Upload assets
on:
release:
types: [published]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest, windows-latest, macos-latest ]
name: Building For ${{ matrix.os }}
steps:
- name: Fix windows CRLF
run: git config --global core.autocrlf false
- name: Check out code into the Go module directory
uses: actions/checkout@v2
# We need to increase the page size because the tests run out of memory on github CI windows.
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
- name: Increase page size on windows
if: runner.os == 'Windows'
shell: powershell
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Build on linux
if: runner.os == 'Linux'
# `-extldflags=-static` - means static link everything, `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
# `-s -w` strips the binary to produce smaller size binaries
run: |
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
zip -r "${archive}" ./bin/*
echo "archive=${archive}" >> $GITHUB_ENV
echo "asset_name=${asset_name}" >> $GITHUB_ENV
- name: Build on Windows
if: runner.os == 'Windows'
shell: bash
run: |
go build -v -ldflags="-s -w" -o bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
powershell "Compress-Archive bin/* \"${archive}\""
echo "archive=${archive}" >> $GITHUB_ENV
echo "asset_name=${asset_name}" >> $GITHUB_ENV
- name: Build on MacOS
if: runner.os == 'macOS'
run: |
go build -v -ldflags="-s -w" -o ./bin/ ./...
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
zip -r "${archive}" ./bin/*
echo "archive=${archive}" >> $GITHUB_ENV
echo "asset_name=${asset_name}" >> $GITHUB_ENV
- name: Upload Release Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: "./${{ env.archive }}"
asset_name: "${{ env.asset_name }}"
asset_content_type: application/zip

View File

@@ -1,49 +0,0 @@
name: Go-Race
on:
schedule:
- cron: "0 0 * * *"
workflow_dispatch:
jobs:
race_test:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
branch: [ master, latest ]
name: Race detection on ${{ matrix.branch }}
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.15
- name: Set scheduled branch name
shell: bash
if: github.event_name == 'schedule'
run: |
if [ "${{ matrix.branch }}" == "master" ]; then
echo "run_on=master" >> $GITHUB_ENV
fi
if [ "${{ matrix.branch }}" == "latest" ]; then
branch=$(git branch -r | grep 'v\([0-9]\+\.\)\([0-9]\+\.\)\([0-9]\+\)-dev' | sort -Vr | head -1 | xargs)
echo "run_on=${branch}" >> $GITHUB_ENV
fi
- name: Set manual branch name
shell: bash
if: github.event_name == 'workflow_dispatch'
run: echo "run_on=${{ github.ref }}" >> $GITHUB_ENV
- name: Test with race detector
shell: bash
run: |
git checkout "${{ env.run_on }}"
git status
go test -race ./...

View File

@@ -1,70 +0,0 @@
name: Go
on:
push:
pull_request:
# edtited - "title, body, or the base branch of the PR is modified"
# synchronize - "commit(s) pushed to the pull request"
types: [opened, synchronize, edited, reopened]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-16.04, macos-10.15 ]
name: Testing on on ${{ matrix.os }}
steps:
- name: Fix windows CRLF
run: git config --global core.autocrlf false
- name: Check out code into the Go module directory
uses: actions/checkout@v2
# We need to increase the page size because the tests run out of memory on github CI windows.
# Use the powershell script from this github action: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1
# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors
- name: Increase page size on windows
if: runner.os == 'Windows'
shell: powershell
run: powershell -command .\.github\workflows\SetPageFileSize.ps1
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
# Source: https://github.com/actions/cache/blob/main/examples.md#go---modules
- name: Go Cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Test
shell: bash
run: ./build_and_test.sh -v
coverage:
runs-on: ubuntu-20.04
name: Produce code coverage
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Create coverage file
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
- name: Upload coverage file
run: bash <(curl -s https://codecov.io/bash)

18
.gitignore vendored
View File

@@ -13,21 +13,6 @@ kaspad.db
*.o
*.a
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Real binaries, build with `go build .`
kaspad
cmd/gencerts/gencerts
cmd/kaspactl/kaspactl
cmd/kasminer/kaspaminer
*.exe
*.exe~
# Output of the go coverage tool
*.out
# Folders
_obj
@@ -46,7 +31,8 @@ _cgo_export.*
_testmain.go
*.exe
# IDE
.idea
.vscode

View File

@@ -1,19 +0,0 @@
# Contributing to Kaspad
Any contribution to Kaspad is very welcome.
## Getting started
If you want to start contributing to Kaspad and don't know where to start, you can pick an issue from
the [list](https://github.com/kaspanet/kaspad/issues).
If you want to make a big change it's better to discuss it first by opening an issue or talk about it in
[Discord](https://discord.gg/WmGhhzk) to avoid duplicate work.
## Pull Request process
Any pull request should be opened against the development branch of the target version. The development branch format is
as follows: `vx.y.z-dev`, for example: `v0.8.5-dev`.
All pull requests should pass the checks written in `build_and_test.sh`, so it's recommended to run this script before
submitting your PR.

View File

@@ -9,16 +9,12 @@ Warning: This is pre-alpha software. There's no guarantee anything works.
Kaspad is the reference full node Kaspa implementation written in Go (golang).
This project is currently under active development and is in a pre-Alpha state.
This project is currently under active development and is in a pre-Alpha state.
Some things still don't work and APIs are far from finalized. The code is provided for reference only.
## What is kaspa
Kaspa is an attempt at a proof-of-work cryptocurrency with instant confirmations and sub-second block times. It is based on [the PHANTOM protocol](https://eprint.iacr.org/2018/104.pdf), a generalization of Nakamoto consensus.
## Requirements
Go 1.16 or later.
Latest version of [Go](http://golang.org) (currently 1.13).
## Installation
@@ -31,17 +27,23 @@ Go 1.16 or later.
```bash
$ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain and install kaspad including all dependencies:
```bash
$ git clone https://github.com/kaspanet/kaspad
$ cd kaspad
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ go install . ./cmd/...
```
- Kaspad (and utilities) should now be installed in `$(go env GOPATH)/bin`. If you did
- Kaspad (and utilities) should now be installed in `$GOPATH/bin`. If you did
not already add the bin directory to your system path during Go installation,
you are encouraged to do so now.
@@ -51,8 +53,10 @@ $ go install . ./cmd/...
Kaspad has several configuration options available to tweak how it runs, but all
of the basic operations work with zero configuration.
#### Linux/BSD/POSIX/Source
```bash
$ kaspad
$ ./kaspad
```
## Discord
@@ -65,8 +69,9 @@ is used for this project.
## Documentation
The [documentation](https://github.com/kaspanet/docs) is a work-in-progress
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
## License
Kaspad is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/).

View File

@@ -7,20 +7,20 @@ import (
"runtime"
"time"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
"github.com/kaspanet/kaspad/infrastructure/os/limits"
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/infrastructure/os/signal"
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/kaspanet/kaspad/version"
)
const leveldbCacheSizeMiB = 256
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
"github.com/kaspanet/kaspad/infrastructure/os/limits"
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
)
var desiredLimits = &limits.DesiredLimits{
FileLimitWant: 2048,
@@ -46,10 +46,9 @@ func StartApp() error {
// initializes logging and configures it accordingly.
cfg, err := config.LoadConfig()
if err != nil {
fmt.Fprintln(os.Stderr, err)
fmt.Fprint(os.Stderr, err)
return err
}
defer logger.BackendLog.Close()
defer panics.HandlePanic(log, "MAIN", nil)
app := &kaspadApp{cfg: cfg}
@@ -124,6 +123,16 @@ func (app *kaspadApp) main(startedChan chan<- struct{}) error {
return nil
}
// Drop indexes and exit if requested.
if app.cfg.DropAcceptanceIndex {
if err := indexers.DropAcceptanceIndex(databaseContext); err != nil {
log.Errorf("%s", err)
return err
}
return nil
}
// Create componentManager and start it.
componentManager, err := NewComponentManager(app.cfg, databaseContext, interrupt)
if err != nil {
@@ -179,8 +188,8 @@ func removeDatabase(cfg *config.Config) error {
return os.RemoveAll(dbPath)
}
func openDB(cfg *config.Config) (database.Database, error) {
func openDB(cfg *config.Config) (*dbaccess.DatabaseContext, error) {
dbPath := databasePath(cfg)
log.Infof("Loading database from '%s'", dbPath)
return ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB)
return dbaccess.New(dbPath)
}

View File

@@ -0,0 +1,431 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"compress/bzip2"
"io/ioutil"
"math"
"os"
"testing"
"github.com/kaspanet/kaspad/util/daghash"
)
// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for
// the main network and test network.
var genesisCoinbaseTxIns = []*TxIn{
{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, 0x45, /* |.......E| */
0x54, 0x68, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, /* |The Time| */
0x73, 0x20, 0x30, 0x33, 0x2f, 0x4a, 0x61, 0x6e, /* |s 03/Jan| */
0x2f, 0x32, 0x30, 0x30, 0x39, 0x20, 0x43, 0x68, /* |/2009 Ch| */
0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x6f, 0x72, /* |ancellor| */
0x20, 0x6f, 0x6e, 0x20, 0x62, 0x72, 0x69, 0x6e, /* | on brin| */
0x6b, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, /* |k of sec|*/
0x6f, 0x6e, 0x64, 0x20, 0x62, 0x61, 0x69, 0x6c, /* |ond bail| */
0x6f, 0x75, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, /* |out for |*/
0x62, 0x61, 0x6e, 0x6b, 0x73, /* |banks| */
},
Sequence: math.MaxUint64,
},
}
var genesisCoinbaseTxOuts = []*TxOut{
{
Value: 0x12a05f200,
ScriptPubKey: []byte{
0x41, 0x04, 0x67, 0x8a, 0xfd, 0xb0, 0xfe, 0x55, /* |A.g....U| */
0x48, 0x27, 0x19, 0x67, 0xf1, 0xa6, 0x71, 0x30, /* |H'.g..q0| */
0xb7, 0x10, 0x5c, 0xd6, 0xa8, 0x28, 0xe0, 0x39, /* |..\..(.9| */
0x09, 0xa6, 0x79, 0x62, 0xe0, 0xea, 0x1f, 0x61, /* |..yb...a| */
0xde, 0xb6, 0x49, 0xf6, 0xbc, 0x3f, 0x4c, 0xef, /* |..I..?L.| */
0x38, 0xc4, 0xf3, 0x55, 0x04, 0xe5, 0x1e, 0xc1, /* |8..U....| */
0x12, 0xde, 0x5c, 0x38, 0x4d, 0xf7, 0xba, 0x0b, /* |..\8M...| */
0x8d, 0x57, 0x8a, 0x4c, 0x70, 0x2b, 0x6b, 0xf1, /* |.W.Lp+k.| */
0x1d, 0x5f, 0xac, /* |._.| */
},
},
}
var genesisCoinbaseTx = NewNativeMsgTx(1, genesisCoinbaseTxIns, genesisCoinbaseTxOuts)
// BenchmarkWriteVarInt1 performs a benchmark on how long it takes to write
// a single byte variable length integer.
func BenchmarkWriteVarInt1(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 1)
}
}
// BenchmarkWriteVarInt3 performs a benchmark on how long it takes to write
// a three byte variable length integer.
func BenchmarkWriteVarInt3(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 65535)
}
}
// BenchmarkWriteVarInt5 performs a benchmark on how long it takes to write
// a five byte variable length integer.
func BenchmarkWriteVarInt5(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 4294967295)
}
}
// BenchmarkWriteVarInt9 performs a benchmark on how long it takes to write
// a nine byte variable length integer.
func BenchmarkWriteVarInt9(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarInt(ioutil.Discard, 18446744073709551615)
}
}
// BenchmarkReadVarInt1 performs a benchmark on how long it takes to read
// a single byte variable length integer.
func BenchmarkReadVarInt1(b *testing.B) {
buf := []byte{0x01}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarInt3 performs a benchmark on how long it takes to read
// a three byte variable length integer.
func BenchmarkReadVarInt3(b *testing.B) {
buf := []byte{0x0fd, 0xff, 0xff}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarInt5 performs a benchmark on how long it takes to read
// a five byte variable length integer.
func BenchmarkReadVarInt5(b *testing.B) {
buf := []byte{0xfe, 0xff, 0xff, 0xff, 0xff}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarInt9 performs a benchmark on how long it takes to read
// a nine byte variable length integer.
func BenchmarkReadVarInt9(b *testing.B) {
buf := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarInt(r)
}
}
// BenchmarkReadVarStr4 performs a benchmark on how long it takes to read a
// four byte variable length string.
func BenchmarkReadVarStr4(b *testing.B) {
buf := []byte{0x04, 't', 'e', 's', 't'}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarString(r, 0)
}
}
// BenchmarkReadVarStr10 performs a benchmark on how long it takes to read a
// ten byte variable length string.
func BenchmarkReadVarStr10(b *testing.B) {
buf := []byte{0x0a, 't', 'e', 's', 't', '0', '1', '2', '3', '4', '5'}
r := bytes.NewReader(buf)
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
ReadVarString(r, 0)
}
}
// BenchmarkWriteVarStr4 performs a benchmark on how long it takes to write a
// four byte variable length string.
func BenchmarkWriteVarStr4(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarString(ioutil.Discard, "test")
}
}
// BenchmarkWriteVarStr10 performs a benchmark on how long it takes to write a
// ten byte variable length string.
func BenchmarkWriteVarStr10(b *testing.B) {
for i := 0; i < b.N; i++ {
WriteVarString(ioutil.Discard, "test012345")
}
}
// BenchmarkReadOutpoint performs a benchmark on how long it takes to read a
// transaction outpoint.
func BenchmarkReadOutpoint(b *testing.B) {
buf := []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Previous output index
}
r := bytes.NewReader(buf)
var op Outpoint
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readOutpoint(r, 0, 0, &op)
}
}
// BenchmarkWriteOutpoint performs a benchmark on how long it takes to write a
// transaction outpoint.
func BenchmarkWriteOutpoint(b *testing.B) {
op := &Outpoint{
TxID: daghash.TxID{},
Index: 0,
}
for i := 0; i < b.N; i++ {
writeOutpoint(ioutil.Discard, 0, 0, op)
}
}
// BenchmarkReadTxOut performs a benchmark on how long it takes to read a
// transaction output.
func BenchmarkReadTxOut(b *testing.B) {
buf := []byte{
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
}
r := bytes.NewReader(buf)
var txOut TxOut
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readTxOut(r, 0, 0, &txOut)
scriptPool.Return(txOut.ScriptPubKey)
}
}
// BenchmarkWriteTxOut performs a benchmark on how long it takes to write
// a transaction output.
func BenchmarkWriteTxOut(b *testing.B) {
txOut := blockOne.Transactions[0].TxOut[0]
for i := 0; i < b.N; i++ {
WriteTxOut(ioutil.Discard, 0, 0, txOut)
}
}
// BenchmarkReadTxIn performs a benchmark on how long it takes to read a
// transaction input.
func BenchmarkReadTxIn(b *testing.B) {
buf := []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Previous output index
0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
}
r := bytes.NewReader(buf)
var txIn TxIn
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readTxIn(r, 0, 0, &txIn)
scriptPool.Return(txIn.SignatureScript)
}
}
// BenchmarkWriteTxIn performs a benchmark on how long it takes to write
// a transaction input.
func BenchmarkWriteTxIn(b *testing.B) {
txIn := blockOne.Transactions[0].TxIn[0]
for i := 0; i < b.N; i++ {
writeTxIn(ioutil.Discard, 0, 0, txIn, txEncodingFull)
}
}
// BenchmarkDeserializeTx performs a benchmark on how long it takes to
// deserialize a small transaction.
func BenchmarkDeserializeTxSmall(b *testing.B) {
buf := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x07, // Varint for length of signature script
0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
}
r := bytes.NewReader(buf)
var tx MsgTx
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
tx.Deserialize(r)
}
}
// BenchmarkDeserializeTxLarge performs a benchmark on how long it takes to
// deserialize a very large transaction.
func BenchmarkDeserializeTxLarge(b *testing.B) {
fi, err := os.Open("testdata/megatx.bin.bz2")
if err != nil {
b.Fatalf("Failed to read transaction data: %v", err)
}
defer fi.Close()
buf, err := ioutil.ReadAll(bzip2.NewReader(fi))
if err != nil {
b.Fatalf("Failed to read transaction data: %v", err)
}
r := bytes.NewReader(buf)
var tx MsgTx
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
tx.Deserialize(r)
}
}
// BenchmarkSerializeTx performs a benchmark on how long it takes to serialize
// a transaction.
func BenchmarkSerializeTx(b *testing.B) {
tx := blockOne.Transactions[0]
for i := 0; i < b.N; i++ {
tx.Serialize(ioutil.Discard)
}
}
// BenchmarkReadBlockHeader performs a benchmark on how long it takes to
// deserialize a block header.
func BenchmarkReadBlockHeader(b *testing.B) {
buf := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock
0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2,
0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61,
0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32,
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot
0x29, 0xab, 0x5f, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x00, // TxnCount Varint
}
r := bytes.NewReader(buf)
var header BlockHeader
for i := 0; i < b.N; i++ {
r.Seek(0, 0)
readBlockHeader(r, 0, &header)
}
}
// BenchmarkWriteBlockHeader performs a benchmark on how long it takes to
// serialize a block header.
func BenchmarkWriteBlockHeader(b *testing.B) {
header := blockOne.Header
for i := 0; i < b.N; i++ {
writeBlockHeader(ioutil.Discard, 0, &header)
}
}
// BenchmarkTxHash performs a benchmark on how long it takes to hash a
// transaction.
func BenchmarkTxHash(b *testing.B) {
for i := 0; i < b.N; i++ {
genesisCoinbaseTx.TxHash()
}
}
// BenchmarkDoubleHashB performs a benchmark on how long it takes to perform a
// double hash returning a byte slice.
func BenchmarkDoubleHashB(b *testing.B) {
var buf bytes.Buffer
if err := genesisCoinbaseTx.Serialize(&buf); err != nil {
b.Errorf("Serialize: unexpected error: %v", err)
return
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = daghash.DoubleHashB(txBytes)
}
}
// BenchmarkDoubleHashH performs a benchmark on how long it takes to perform
// a double hash returning a daghash.Hash.
func BenchmarkDoubleHashH(b *testing.B) {
var buf bytes.Buffer
if err := genesisCoinbaseTx.Serialize(&buf); err != nil {
b.Errorf("Serialize: unexpected error: %v", err)
return
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = daghash.DoubleHashH(txBytes)
}
}
// BenchmarkDoubleHashWriter performs a benchmark on how long it takes to perform
// a double hash via the writer returning a daghash.Hash.
func BenchmarkDoubleHashWriter(b *testing.B) {
var buf bytes.Buffer
err := genesisCoinbaseTx.Serialize(&buf)
if err != nil {
b.Fatalf("Serialize: unexpected error: %+v", err)
}
txBytes := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
writer := daghash.NewDoubleHashWriter()
_, _ = writer.Write(txBytes)
writer.Finalize()
}
}

View File

@@ -5,12 +5,34 @@
package appmessage
import (
"encoding/binary"
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
)
// MaxVarIntPayload is the maximum payload size for a variable length integer.
const MaxVarIntPayload = 9
// MaxInvPerMsg is the maximum number of inventory vectors that can be in any type of kaspa inv message.
const MaxInvPerMsg = 1 << 17
var (
// littleEndian is a convenience variable since binary.LittleEndian is
// quite long.
littleEndian = binary.LittleEndian
// bigEndian is a convenience variable since binary.BigEndian is quite
// long.
bigEndian = binary.BigEndian
)
// errNonCanonicalVarInt is the common format string used for non-canonically
// encoded variable length integer errors.
var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
@@ -18,3 +40,473 @@ var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " +
// errNoEncodingForType signifies that there's no encoding for the given type.
var errNoEncodingForType = errors.New("there's no encoding for this type")
// int64Time represents a unix timestamp with milliseconds precision encoded with
// an int64. It is used as a way to signal the readElement function how to decode
// a timestamp into a Go mstime.Time since it is otherwise ambiguous.
type int64Time mstime.Time
// ReadElement reads the next sequence of bytes from r using little endian
// depending on the concrete type of element pointed to.
func ReadElement(r io.Reader, element interface{}) error {
// Attempt to read the element based on the concrete type via fast
// type assertions first.
switch e := element.(type) {
case *int32:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = int32(rv)
return nil
case *uint32:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = rv
return nil
case *int64:
rv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = int64(rv)
return nil
case *uint64:
rv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = rv
return nil
case *uint8:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
*e = rv
return nil
case *bool:
rv, err := binaryserializer.Uint8(r)
if err != nil {
return err
}
if rv == 0x00 {
*e = false
} else {
*e = true
}
return nil
// Unix timestamp encoded as an int64.
case *int64Time:
rv, err := binaryserializer.Uint64(r, binary.LittleEndian)
if err != nil {
return err
}
*e = int64Time(mstime.UnixMilliseconds(int64(rv)))
return nil
// Message header checksum.
case *[4]byte:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
// Message header command.
case *MessageCommand:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = MessageCommand(rv)
return nil
// IP address.
case *[16]byte:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *daghash.Hash:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *id.ID:
return e.Deserialize(r)
case *subnetworkid.SubnetworkID:
_, err := io.ReadFull(r, e[:])
if err != nil {
return err
}
return nil
case *ServiceFlag:
rv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return err
}
*e = ServiceFlag(rv)
return nil
case *KaspaNet:
rv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
*e = KaspaNet(rv)
return nil
}
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to read type %T", element)
}
// readElements reads multiple items from r. It is equivalent to multiple
// calls to readElement.
func readElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
err := ReadElement(r, element)
if err != nil {
return err
}
}
return nil
}
// WriteElement writes the little endian representation of element to w.
func WriteElement(w io.Writer, element interface{}) error {
// Attempt to write the element based on the concrete type via fast
// type assertions first.
switch e := element.(type) {
case int32:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
case uint32:
err := binaryserializer.PutUint32(w, littleEndian, e)
if err != nil {
return err
}
return nil
case int64:
err := binaryserializer.PutUint64(w, littleEndian, uint64(e))
if err != nil {
return err
}
return nil
case uint64:
err := binaryserializer.PutUint64(w, littleEndian, e)
if err != nil {
return err
}
return nil
case uint8:
err := binaryserializer.PutUint8(w, e)
if err != nil {
return err
}
return nil
case bool:
var err error
if e {
err = binaryserializer.PutUint8(w, 0x01)
} else {
err = binaryserializer.PutUint8(w, 0x00)
}
if err != nil {
return err
}
return nil
// Message header checksum.
case [4]byte:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
// Message header command.
case MessageCommand:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
// IP address.
case [16]byte:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case *daghash.Hash:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case *id.ID:
return e.Serialize(w)
case *subnetworkid.SubnetworkID:
_, err := w.Write(e[:])
if err != nil {
return err
}
return nil
case ServiceFlag:
err := binaryserializer.PutUint64(w, littleEndian, uint64(e))
if err != nil {
return err
}
return nil
case KaspaNet:
err := binaryserializer.PutUint32(w, littleEndian, uint32(e))
if err != nil {
return err
}
return nil
}
return errors.Wrapf(errNoEncodingForType, "couldn't find a way to write type %T", element)
}
// writeElements writes multiple items to w. It is equivalent to multiple
// calls to writeElement.
func writeElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
err := WriteElement(w, element)
if err != nil {
return err
}
}
return nil
}
// ReadVarInt reads a variable length integer from r and returns it as a uint64.
func ReadVarInt(r io.Reader) (uint64, error) {
discriminant, err := binaryserializer.Uint8(r)
if err != nil {
return 0, err
}
var rv uint64
switch discriminant {
case 0xff:
sv, err := binaryserializer.Uint64(r, littleEndian)
if err != nil {
return 0, err
}
rv = sv
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0x100000000)
if rv < min {
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
case 0xfe:
sv, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return 0, err
}
rv = uint64(sv)
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0x10000)
if rv < min {
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
case 0xfd:
sv, err := binaryserializer.Uint16(r, littleEndian)
if err != nil {
return 0, err
}
rv = uint64(sv)
// The encoding is not canonical if the value could have been
// encoded using fewer bytes.
min := uint64(0xfd)
if rv < min {
return 0, messageError("readVarInt", fmt.Sprintf(
errNonCanonicalVarInt, rv, discriminant, min))
}
default:
rv = uint64(discriminant)
}
return rv, nil
}
// WriteVarInt serializes val to w using a variable number of bytes depending
// on its value.
func WriteVarInt(w io.Writer, val uint64) error {
if val < 0xfd {
_, err := w.Write([]byte{uint8(val)})
return errors.WithStack(err)
}
if val <= math.MaxUint16 {
var buf [3]byte
buf[0] = 0xfd
littleEndian.PutUint16(buf[1:], uint16(val))
_, err := w.Write(buf[:])
return errors.WithStack(err)
}
if val <= math.MaxUint32 {
var buf [5]byte
buf[0] = 0xfe
littleEndian.PutUint32(buf[1:], uint32(val))
_, err := w.Write(buf[:])
return errors.WithStack(err)
}
var buf [9]byte
buf[0] = 0xff
littleEndian.PutUint64(buf[1:], val)
_, err := w.Write(buf[:])
return errors.WithStack(err)
}
// VarIntSerializeSize returns the number of bytes it would take to serialize
// val as a variable length integer.
func VarIntSerializeSize(val uint64) int {
// The value is small enough to be represented by itself, so it's
// just 1 byte.
if val < 0xfd {
return 1
}
// Discriminant 1 byte plus 2 bytes for the uint16.
if val <= math.MaxUint16 {
return 3
}
// Discriminant 1 byte plus 4 bytes for the uint32.
if val <= math.MaxUint32 {
return 5
}
// Discriminant 1 byte plus 8 bytes for the uint64.
return 9
}
// ReadVarString reads a variable length string from r and returns it as a Go
// string. A variable length string is encoded as a variable length integer
// containing the length of the string followed by the bytes that represent the
// string itself. An error is returned if the length is greater than the
// maximum block payload size since it helps protect against memory exhaustion
// attacks and forced panics through malformed messages.
func ReadVarString(r io.Reader, pver uint32) (string, error) {
count, err := ReadVarInt(r)
if err != nil {
return "", err
}
// Prevent variable length strings that are larger than the maximum
// message size. It would be possible to cause memory exhaustion and
// panics without a sane upper bound on this count.
if count > MaxMessagePayload {
str := fmt.Sprintf("variable length string is too long "+
"[count %d, max %d]", count, MaxMessagePayload)
return "", messageError("ReadVarString", str)
}
buf := make([]byte, count)
_, err = io.ReadFull(r, buf)
if err != nil {
return "", err
}
return string(buf), nil
}
// WriteVarString serializes str to w as a variable length integer containing
// the length of the string followed by the bytes that represent the string
// itself.
func WriteVarString(w io.Writer, str string) error {
err := WriteVarInt(w, uint64(len(str)))
if err != nil {
return err
}
_, err = w.Write([]byte(str))
return err
}
// ReadVarBytes reads a variable length byte array. A byte array is encoded
// as a varInt containing the length of the array followed by the bytes
// themselves. An error is returned if the length is greater than the
// passed maxAllowed parameter which helps protect against memory exhaustion
// attacks and forced panics through malformed messages. The fieldName
// parameter is only used for the error message so it provides more context in
// the error.
func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32,
fieldName string) ([]byte, error) {
count, err := ReadVarInt(r)
if err != nil {
return nil, err
}
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
str := fmt.Sprintf("%s is larger than the max allowed size "+
"[count %d, max %d]", fieldName, count, maxAllowed)
return nil, messageError("ReadVarBytes", str)
}
b := make([]byte, count)
_, err = io.ReadFull(r, b)
if err != nil {
return nil, err
}
return b, nil
}
// WriteVarBytes serializes a variable length byte array to w as a varInt
// containing the number of bytes, followed by the bytes themselves.
func WriteVarBytes(w io.Writer, pver uint32, bytes []byte) error {
slen := uint64(len(bytes))
err := WriteVarInt(w, slen)
if err != nil {
return err
}
_, err = w.Write(bytes)
return err
}

View File

@@ -1,44 +1,695 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
import (
"bytes"
"github.com/pkg/errors"
"io"
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// mainnetGenesisHash is the hash of the first block in the block DAG for the
// main network (genesis block).
var mainnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
var mainnetGenesisHash = &daghash.Hash{
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25,
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
})
}
// simnetGenesisHash is the hash of the first block in the block DAG for the
// simulation test network.
var simnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
0x9d, 0x89, 0xb0, 0x6e, 0xb3, 0x47, 0xb5, 0x6e,
0xcd, 0x6c, 0x63, 0x99, 0x45, 0x91, 0xd5, 0xce,
0x9b, 0x43, 0x05, 0xc1, 0xa5, 0x5e, 0x2a, 0xda,
0x90, 0x4c, 0xf0, 0x6c, 0x4d, 0x5f, 0xd3, 0x62,
})
var simnetGenesisHash = &daghash.Hash{
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a,
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
}
// mainnetGenesisMerkleRoot is the hash of the first transaction in the genesis
// block for the main network.
var mainnetGenesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
var mainnetGenesisMerkleRoot = &daghash.Hash{
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a,
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
})
}
var exampleAcceptedIDMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
var exampleAcceptedIDMerkleRoot = &daghash.Hash{
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
})
}
var exampleUTXOCommitment = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
var exampleUTXOCommitment = &daghash.Hash{
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C,
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
})
}
// TestElementEncoding tests appmessage encode and decode for various element types. This
// is mainly to test the "fast" paths in readElement and writeElement which use
// type assertions to avoid reflection when possible.
func TestElementEncoding(t *testing.T) {
tests := []struct {
in interface{} // Value to encode
buf []byte // Encoded value
}{
{int32(1), []byte{0x01, 0x00, 0x00, 0x00}},
{uint32(256), []byte{0x00, 0x01, 0x00, 0x00}},
{
int64(65536),
[]byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00},
},
{
uint64(4294967296),
[]byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
},
{
true,
[]byte{0x01},
},
{
false,
[]byte{0x00},
},
{
[4]byte{0x01, 0x02, 0x03, 0x04},
[]byte{0x01, 0x02, 0x03, 0x04},
},
{
MessageCommand(0x10),
[]byte{
0x10, 0x00, 0x00, 0x00,
},
},
{
[16]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
},
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
},
},
{
(*daghash.Hash)(&[daghash.HashSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
}),
[]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
},
},
{
ServiceFlag(SFNodeNetwork),
[]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
},
{
KaspaNet(Mainnet),
[]byte{0x1d, 0xf7, 0xdc, 0x3d},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Write to appmessage format.
var buf bytes.Buffer
err := WriteElement(&buf, test.in)
if err != nil {
t.Errorf("writeElement #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("writeElement #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Read from appmessage format.
rbuf := bytes.NewReader(test.buf)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
val = reflect.New(reflect.TypeOf(test.in)).Interface()
}
err = ReadElement(rbuf, val)
if err != nil {
t.Errorf("readElement #%d error %v", i, err)
continue
}
ival := val
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
ival = reflect.Indirect(reflect.ValueOf(val)).Interface()
}
if !reflect.DeepEqual(ival, test.in) {
t.Errorf("readElement #%d\n got: %s want: %s", i,
spew.Sdump(ival), spew.Sdump(test.in))
continue
}
}
}
// TestElementEncodingErrors performs negative tests against appmessage encode and decode
// of various element types to confirm error paths work correctly.
func TestElementEncodingErrors(t *testing.T) {
type writeElementReflect int32
tests := []struct {
in interface{} // Value to encode
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
{int32(1), 0, io.ErrShortWrite, io.EOF},
{uint32(256), 0, io.ErrShortWrite, io.EOF},
{int64(65536), 0, io.ErrShortWrite, io.EOF},
{true, 0, io.ErrShortWrite, io.EOF},
{[4]byte{0x01, 0x02, 0x03, 0x04}, 0, io.ErrShortWrite, io.EOF},
{
MessageCommand(10),
0, io.ErrShortWrite, io.EOF,
},
{
[16]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
},
0, io.ErrShortWrite, io.EOF,
},
{
(*daghash.Hash)(&[daghash.HashSize]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
}),
0, io.ErrShortWrite, io.EOF,
},
{ServiceFlag(SFNodeNetwork), 0, io.ErrShortWrite, io.EOF},
{KaspaNet(Mainnet), 0, io.ErrShortWrite, io.EOF},
// Type with no supported encoding.
{writeElementReflect(0), 0, errNoEncodingForType, errNoEncodingForType},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteElement(w, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("writeElement #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, nil)
val := test.in
if reflect.ValueOf(test.in).Kind() != reflect.Ptr {
val = reflect.New(reflect.TypeOf(test.in)).Interface()
}
err = ReadElement(r, val)
if !errors.Is(err, test.readErr) {
t.Errorf("readElement #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarIntEncoding tests appmessage encode and decode for variable length integers.
func TestVarIntEncoding(t *testing.T) {
tests := []struct {
value uint64 // Value to encode
buf []byte // Encoded value
}{
// Latest protocol version.
// Single byte
{0, []byte{0x00}},
// Max single byte
{0xfc, []byte{0xfc}},
// Min 2-byte
{0xfd, []byte{0xfd, 0x0fd, 0x00}},
// Max 2-byte
{0xffff, []byte{0xfd, 0xff, 0xff}},
// Min 4-byte
{0x10000, []byte{0xfe, 0x00, 0x00, 0x01, 0x00}},
// Max 4-byte
{0xffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff}},
// Min 8-byte
{
0x100000000,
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00},
},
// Max 8-byte
{
0xffffffffffffffff,
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
buf := &bytes.Buffer{}
err := WriteVarInt(buf, test.value)
if err != nil {
t.Errorf("WriteVarInt #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("WriteVarInt #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarInt(rbuf)
if err != nil {
t.Errorf("ReadVarInt #%d error %v", i, err)
continue
}
if val != test.value {
t.Errorf("ReadVarInt #%d\n got: %x want: %x", i,
val, test.value)
continue
}
}
}
// TestVarIntEncodingErrors performs negative tests against appmessage encode and decode
// of variable length integers to confirm error paths work correctly.
func TestVarIntEncodingErrors(t *testing.T) {
tests := []struct {
in uint64 // Value to encode
buf []byte // Encoded value
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force errors on discriminant.
{0, []byte{0x00}, 0, io.ErrShortWrite, io.EOF},
// Force errors on 2-byte read/write.
{0xfd, []byte{0xfd}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
{0xfd, []byte{0xfd}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
// Force errors on 4-byte read/write.
{0x10000, []byte{0xfe}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
{0x10000, []byte{0xfe}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
// Force errors on 8-byte read/write.
{0x100000000, []byte{0xff}, 0, io.ErrShortWrite, io.EOF}, // error on writing length
{0x100000000, []byte{0xff}, 2, io.ErrShortWrite, io.ErrUnexpectedEOF}, // error on writing actual data
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarInt(w, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarInt #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarInt(r)
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarInt #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarIntNonCanonical ensures variable length integers that are not encoded
// canonically return the expected error.
func TestVarIntNonCanonical(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
name string // Test name for easier identification
in []byte // Value to decode
pver uint32 // Protocol version for appmessage encoding
}{
{
"0 encoded with 3 bytes", []byte{0xfd, 0x00, 0x00},
pver,
},
{
"max single-byte value encoded with 3 bytes",
[]byte{0xfd, 0xfc, 0x00}, pver,
},
{
"0 encoded with 5 bytes",
[]byte{0xfe, 0x00, 0x00, 0x00, 0x00}, pver,
},
{
"max three-byte value encoded with 5 bytes",
[]byte{0xfe, 0xff, 0xff, 0x00, 0x00}, pver,
},
{
"0 encoded with 9 bytes",
[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
pver,
},
{
"max five-byte value encoded with 9 bytes",
[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00},
pver,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
rbuf := bytes.NewReader(test.in)
val, err := ReadVarInt(rbuf)
if msgErr := &(MessageError{}); !errors.As(err, &msgErr) {
t.Errorf("ReadVarInt #%d (%s) unexpected error %v", i,
test.name, err)
continue
}
if val != 0 {
t.Errorf("ReadVarInt #%d (%s)\n got: %d want: 0", i,
test.name, val)
continue
}
}
}
// TestVarIntEncoding tests the serialize size for variable length integers.
func TestVarIntSerializeSize(t *testing.T) {
tests := []struct {
val uint64 // Value to get the serialized size for
size int // Expected serialized size
}{
// Single byte
{0, 1},
// Max single byte
{0xfc, 1},
// Min 2-byte
{0xfd, 3},
// Max 2-byte
{0xffff, 3},
// Min 4-byte
{0x10000, 5},
// Max 4-byte
{0xffffffff, 5},
// Min 8-byte
{0x100000000, 9},
// Max 8-byte
{0xffffffffffffffff, 9},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := VarIntSerializeSize(test.val)
if serializedSize != test.size {
t.Errorf("VarIntSerializeSize #%d got: %d, want: %d", i,
serializedSize, test.size)
continue
}
}
}
// TestVarStringEncoding tests appmessage encode and decode for variable length strings.
func TestVarStringEncoding(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
str256 := strings.Repeat("test", 64)
tests := []struct {
in string // String to encode
out string // String to decoded value
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty string
{"", "", []byte{0x00}, pver},
// Single byte varint + string
{"Test", "Test", append([]byte{0x04}, []byte("Test")...), pver},
// 2-byte varint + string
{str256, str256, append([]byte{0xfd, 0x00, 0x01}, []byte(str256)...), pver},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarString(&buf, test.in)
if err != nil {
t.Errorf("WriteVarString #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("WriteVarString #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarString(rbuf, test.pver)
if err != nil {
t.Errorf("ReadVarString #%d error %v", i, err)
continue
}
if val != test.out {
t.Errorf("ReadVarString #%d\n got: %s want: %s", i,
val, test.out)
continue
}
}
}
// TestVarStringEncodingErrors performs negative tests against appmessage encode and
// decode of variable length strings to confirm error paths work correctly.
func TestVarStringEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// str256 is a string that takes a 2-byte varint to encode.
str256 := strings.Repeat("test", 64)
tests := []struct {
in string // Value to encode
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Latest protocol version with intentional read/write errors.
// Force errors on empty string.
{"", []byte{0x00}, pver, 0, io.ErrShortWrite, io.EOF},
// Force error on single byte varint + string.
{"Test", []byte{0x04}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
// Force errors on 2-byte varint + string.
{str256, []byte{0xfd}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarString(w, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarString #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarString(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarString #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarStringOverflowErrors performs tests to ensure deserializing variable
// length strings intentionally crafted to use large values for the string
// length are handled properly. This could otherwise potentially be used as an
// attack vector.
func TestVarStringOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
pver, &MessageError{}},
{[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
pver, &MessageError{}},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarString(rbuf, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("ReadVarString #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
// TestVarBytesEncoding tests appmessage encode and decode for variable length byte array.
func TestVarBytesEncoding(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
bytes256 := bytes.Repeat([]byte{0x01}, 256)
tests := []struct {
in []byte // Byte Array to write
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
// Empty byte array
{[]byte{}, []byte{0x00}, pver},
// Single byte varint + byte array
{[]byte{0x01}, []byte{0x01, 0x01}, pver},
// 2-byte varint + byte array
{bytes256, append([]byte{0xfd, 0x00, 0x01}, bytes256...), pver},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
var buf bytes.Buffer
err := WriteVarBytes(&buf, test.pver, test.in)
if err != nil {
t.Errorf("WriteVarBytes #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("WriteVarBytes #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
val, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")
if err != nil {
t.Errorf("ReadVarBytes #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("ReadVarBytes #%d\n got: %s want: %s", i,
val, test.buf)
continue
}
}
}
// TestVarBytesEncodingErrors performs negative tests against appmessage encode and
// decode of variable length byte arrays to confirm error paths work correctly.
func TestVarBytesEncodingErrors(t *testing.T) {
pver := ProtocolVersion
// bytes256 is a byte array that takes a 2-byte varint to encode.
bytes256 := bytes.Repeat([]byte{0x01}, 256)
tests := []struct {
in []byte // Byte Array to write
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Latest protocol version with intentional read/write errors.
// Force errors on empty byte array.
{[]byte{}, []byte{0x00}, pver, 0, io.ErrShortWrite, io.EOF},
// Force error on single byte varint + byte array.
{[]byte{0x01, 0x02, 0x03}, []byte{0x04}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
// Force errors on 2-byte varint + byte array.
{bytes256, []byte{0xfd}, pver, 2, io.ErrShortWrite, io.ErrUnexpectedEOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := WriteVarBytes(w, test.pver, test.in)
if !errors.Is(err, test.writeErr) {
t.Errorf("WriteVarBytes #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
r := newFixedReader(test.max, test.buf)
_, err = ReadVarBytes(r, test.pver, MaxMessagePayload,
"test payload")
if !errors.Is(err, test.readErr) {
t.Errorf("ReadVarBytes #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestVarBytesOverflowErrors performs tests to ensure deserializing variable
// length byte arrays intentionally crafted to use large values for the array
// length are handled properly. This could otherwise potentially be used as an
// attack vector.
func TestVarBytesOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
{[]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
pver, &MessageError{}},
{[]byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
pver, &MessageError{}},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
rbuf := bytes.NewReader(test.buf)
_, err := ReadVarBytes(rbuf, test.pver, MaxMessagePayload,
"test payload")
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("ReadVarBytes #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}

View File

@@ -1,305 +0,0 @@
package appmessage
import (
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"github.com/kaspanet/kaspad/util/mstime"
)
// DomainBlockToMsgBlock converts an externalapi.DomainBlock to MsgBlock
func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock {
msgTxs := make([]*MsgTx, 0, len(domainBlock.Transactions))
for _, domainTransaction := range domainBlock.Transactions {
msgTxs = append(msgTxs, DomainTransactionToMsgTx(domainTransaction))
}
return &MsgBlock{
Header: *DomainBlockHeaderToBlockHeader(domainBlock.Header),
Transactions: msgTxs,
}
}
// DomainBlockHeaderToBlockHeader converts an externalapi.BlockHeader to MsgBlockHeader
func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader {
return &MsgBlockHeader{
Version: domainBlockHeader.Version(),
ParentHashes: domainBlockHeader.ParentHashes(),
HashMerkleRoot: domainBlockHeader.HashMerkleRoot(),
AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(),
UTXOCommitment: domainBlockHeader.UTXOCommitment(),
Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()),
Bits: domainBlockHeader.Bits(),
Nonce: domainBlockHeader.Nonce(),
}
}
// MsgBlockToDomainBlock converts a MsgBlock to externalapi.DomainBlock
func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock {
transactions := make([]*externalapi.DomainTransaction, 0, len(msgBlock.Transactions))
for _, msgTx := range msgBlock.Transactions {
transactions = append(transactions, MsgTxToDomainTransaction(msgTx))
}
return &externalapi.DomainBlock{
Header: BlockHeaderToDomainBlockHeader(&msgBlock.Header),
Transactions: transactions,
}
}
// BlockHeaderToDomainBlockHeader converts a MsgBlockHeader to externalapi.BlockHeader
func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader {
return blockheader.NewImmutableBlockHeader(
blockHeader.Version,
blockHeader.ParentHashes,
blockHeader.HashMerkleRoot,
blockHeader.AcceptedIDMerkleRoot,
blockHeader.UTXOCommitment,
blockHeader.Timestamp.UnixMilliseconds(),
blockHeader.Bits,
blockHeader.Nonce,
)
}
// DomainTransactionToMsgTx converts an externalapi.DomainTransaction into an MsgTx
func DomainTransactionToMsgTx(domainTransaction *externalapi.DomainTransaction) *MsgTx {
txIns := make([]*TxIn, 0, len(domainTransaction.Inputs))
for _, input := range domainTransaction.Inputs {
txIns = append(txIns, domainTransactionInputToTxIn(input))
}
txOuts := make([]*TxOut, 0, len(domainTransaction.Outputs))
for _, output := range domainTransaction.Outputs {
txOuts = append(txOuts, domainTransactionOutputToTxOut(output))
}
return &MsgTx{
Version: domainTransaction.Version,
TxIn: txIns,
TxOut: txOuts,
LockTime: domainTransaction.LockTime,
SubnetworkID: domainTransaction.SubnetworkID,
Gas: domainTransaction.Gas,
PayloadHash: domainTransaction.PayloadHash,
Payload: domainTransaction.Payload,
}
}
func domainTransactionOutputToTxOut(domainTransactionOutput *externalapi.DomainTransactionOutput) *TxOut {
return &TxOut{
Value: domainTransactionOutput.Value,
ScriptPubKey: domainTransactionOutput.ScriptPublicKey,
}
}
func domainTransactionInputToTxIn(domainTransactionInput *externalapi.DomainTransactionInput) *TxIn {
return &TxIn{
PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint),
SignatureScript: domainTransactionInput.SignatureScript,
Sequence: domainTransactionInput.Sequence,
}
}
func domainOutpointToOutpoint(domainOutpoint externalapi.DomainOutpoint) *Outpoint {
return NewOutpoint(
&domainOutpoint.TransactionID,
domainOutpoint.Index)
}
// MsgTxToDomainTransaction converts an MsgTx into externalapi.DomainTransaction
func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
transactionInputs := make([]*externalapi.DomainTransactionInput, 0, len(msgTx.TxIn))
for _, txIn := range msgTx.TxIn {
transactionInputs = append(transactionInputs, txInToDomainTransactionInput(txIn))
}
transactionOutputs := make([]*externalapi.DomainTransactionOutput, 0, len(msgTx.TxOut))
for _, txOut := range msgTx.TxOut {
transactionOutputs = append(transactionOutputs, txOutToDomainTransactionOutput(txOut))
}
payload := make([]byte, 0)
if msgTx.Payload != nil {
payload = msgTx.Payload
}
return &externalapi.DomainTransaction{
Version: msgTx.Version,
Inputs: transactionInputs,
Outputs: transactionOutputs,
LockTime: msgTx.LockTime,
SubnetworkID: msgTx.SubnetworkID,
Gas: msgTx.Gas,
PayloadHash: msgTx.PayloadHash,
Payload: payload,
}
}
func txOutToDomainTransactionOutput(txOut *TxOut) *externalapi.DomainTransactionOutput {
return &externalapi.DomainTransactionOutput{
Value: txOut.Value,
ScriptPublicKey: txOut.ScriptPubKey,
}
}
func txInToDomainTransactionInput(txIn *TxIn) *externalapi.DomainTransactionInput {
return &externalapi.DomainTransactionInput{
PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO
SignatureScript: txIn.SignatureScript,
Sequence: txIn.Sequence,
}
}
func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
return &externalapi.DomainOutpoint{
TransactionID: outpoint.TxID,
Index: outpoint.Index,
}
}
// RPCTransactionToDomainTransaction converts RPCTransactions to DomainTransactions
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
for i, input := range rpcTransaction.Inputs {
transactionID, err := transactionid.FromString(input.PreviousOutpoint.TransactionID)
if err != nil {
return nil, err
}
previousOutpoint := &externalapi.DomainOutpoint{
TransactionID: *transactionID,
Index: input.PreviousOutpoint.Index,
}
signatureScript, err := hex.DecodeString(input.SignatureScript)
if err != nil {
return nil, err
}
inputs[i] = &externalapi.DomainTransactionInput{
PreviousOutpoint: *previousOutpoint,
SignatureScript: signatureScript,
Sequence: input.Sequence,
}
}
outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs))
for i, output := range rpcTransaction.Outputs {
scriptPublicKey, err := hex.DecodeString(output.ScriptPublicKey.Script)
if err != nil {
return nil, err
}
outputs[i] = &externalapi.DomainTransactionOutput{
Value: output.Amount,
ScriptPublicKey: &externalapi.ScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
}
}
subnetworkID, err := subnetworks.FromString(rpcTransaction.SubnetworkID)
if err != nil {
return nil, err
}
payloadHash, err := externalapi.NewDomainHashFromString(rpcTransaction.PayloadHash)
if err != nil {
return nil, err
}
payload, err := hex.DecodeString(rpcTransaction.Payload)
if err != nil {
return nil, err
}
return &externalapi.DomainTransaction{
Version: rpcTransaction.Version,
Inputs: inputs,
Outputs: outputs,
LockTime: rpcTransaction.LockTime,
SubnetworkID: *subnetworkID,
Gas: rpcTransaction.LockTime,
PayloadHash: *payloadHash,
Payload: payload,
}, nil
}
// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions
func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction {
inputs := make([]*RPCTransactionInput, len(transaction.Inputs))
for i, input := range transaction.Inputs {
transactionID := input.PreviousOutpoint.TransactionID.String()
previousOutpoint := &RPCOutpoint{
TransactionID: transactionID,
Index: input.PreviousOutpoint.Index,
}
signatureScript := hex.EncodeToString(input.SignatureScript)
inputs[i] = &RPCTransactionInput{
PreviousOutpoint: previousOutpoint,
SignatureScript: signatureScript,
Sequence: input.Sequence,
}
}
outputs := make([]*RPCTransactionOutput, len(transaction.Outputs))
for i, output := range transaction.Outputs {
scriptPublicKey := hex.EncodeToString(output.ScriptPublicKey.Script)
outputs[i] = &RPCTransactionOutput{
Amount: output.Value,
ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
}
}
subnetworkID := transaction.SubnetworkID.String()
payloadHash := transaction.PayloadHash.String()
payload := hex.EncodeToString(transaction.Payload)
return &RPCTransaction{
Version: transaction.Version,
Inputs: inputs,
Outputs: outputs,
LockTime: transaction.LockTime,
SubnetworkID: subnetworkID,
Gas: transaction.LockTime,
PayloadHash: payloadHash,
Payload: payload,
}
}
// OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs converts
// OutpointAndUTXOEntryPairs to domain OutpointAndUTXOEntryPairs
func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) []*externalapi.OutpointAndUTXOEntryPair {
domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
domainOutpointAndUTXOEntryPairs[i] = &externalapi.OutpointAndUTXOEntryPair{
Outpoint: &externalapi.DomainOutpoint{
TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID,
Index: outpointAndUTXOEntryPair.Outpoint.Index,
},
UTXOEntry: utxo.NewUTXOEntry(
outpointAndUTXOEntryPair.UTXOEntry.Amount,
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore,
),
}
}
return domainOutpointAndUTXOEntryPairs
}
// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts
// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs
func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) []*OutpointAndUTXOEntryPair {
domainOutpointAndUTXOEntryPairs := make([]*OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs))
for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs {
domainOutpointAndUTXOEntryPairs[i] = &OutpointAndUTXOEntryPair{
Outpoint: &Outpoint{
TxID: outpointAndUTXOEntryPair.Outpoint.TransactionID,
Index: outpointAndUTXOEntryPair.Outpoint.Index,
},
UTXOEntry: &UTXOEntry{
Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount(),
ScriptPublicKey: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey(),
IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase(),
BlockBlueScore: outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore(),
},
}
}
return domainOutpointAndUTXOEntryPairs
}

View File

@@ -34,35 +34,24 @@ const (
CmdVerAck
CmdRequestAddresses
CmdAddresses
CmdRequestHeaders
CmdRequestIBDBlocks
CmdBlock
CmdTx
CmdPing
CmdPong
CmdRequestBlockLocator
CmdBlockLocator
CmdSelectedTip
CmdRequestSelectedTip
CmdInvRelayBlock
CmdRequestRelayBlocks
CmdInvTransaction
CmdRequestTransactions
CmdIBDBlock
CmdDoneHeaders
CmdRequestNextIBDBlocks
CmdDoneIBDBlocks
CmdTransactionNotFound
CmdReject
CmdHeader
CmdRequestNextHeaders
CmdRequestPruningPointUTXOSetAndBlock
CmdPruningPointUTXOSetChunk
CmdRequestIBDBlocks
CmdUnexpectedPruningPoint
CmdRequestPruningPointHash
CmdPruningPointHash
CmdIBDBlockLocator
CmdIBDBlockLocatorHighestHash
CmdIBDBlockLocatorHighestHashNotFound
CmdBlockHeaders
CmdRequestNextPruningPointUTXOSetChunk
CmdDonePruningPointUTXOSetChunks
// rpc
CmdGetCurrentNetworkRequestMessage
@@ -87,15 +76,15 @@ const (
CmdAddPeerResponseMessage
CmdSubmitTransactionRequestMessage
CmdSubmitTransactionResponseMessage
CmdNotifyVirtualSelectedParentChainChangedRequestMessage
CmdNotifyVirtualSelectedParentChainChangedResponseMessage
CmdVirtualSelectedParentChainChangedNotificationMessage
CmdNotifyChainChangedRequestMessage
CmdNotifyChainChangedResponseMessage
CmdChainChangedNotificationMessage
CmdGetBlockRequestMessage
CmdGetBlockResponseMessage
CmdGetSubnetworkRequestMessage
CmdGetSubnetworkResponseMessage
CmdGetVirtualSelectedParentChainFromBlockRequestMessage
CmdGetVirtualSelectedParentChainFromBlockResponseMessage
CmdGetChainFromBlockRequestMessage
CmdGetChainFromBlockResponseMessage
CmdGetBlocksRequestMessage
CmdGetBlocksResponseMessage
CmdGetBlockCountRequestMessage
@@ -114,140 +103,83 @@ const (
CmdShutDownResponseMessage
CmdGetHeadersRequestMessage
CmdGetHeadersResponseMessage
CmdNotifyUTXOsChangedRequestMessage
CmdNotifyUTXOsChangedResponseMessage
CmdUTXOsChangedNotificationMessage
CmdStopNotifyingUTXOsChangedRequestMessage
CmdStopNotifyingUTXOsChangedResponseMessage
CmdGetUTXOsByAddressesRequestMessage
CmdGetUTXOsByAddressesResponseMessage
CmdGetVirtualSelectedParentBlueScoreRequestMessage
CmdGetVirtualSelectedParentBlueScoreResponseMessage
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
CmdBanRequestMessage
CmdBanResponseMessage
CmdUnbanRequestMessage
CmdUnbanResponseMessage
CmdGetInfoRequestMessage
CmdGetInfoResponseMessage
CmdNotifyPruningPointUTXOSetOverrideRequestMessage
CmdNotifyPruningPointUTXOSetOverrideResponseMessage
CmdPruningPointUTXOSetOverrideNotificationMessage
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage
)
// ProtocolMessageCommandToString maps all MessageCommands to their string representation
var ProtocolMessageCommandToString = map[MessageCommand]string{
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestHeaders: "RequestHeaders",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdDoneHeaders: "DoneHeaders",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
CmdHeader: "Header",
CmdRequestNextHeaders: "RequestNextHeaders",
CmdRequestPruningPointUTXOSetAndBlock: "RequestPruningPointUTXOSetAndBlock",
CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk",
CmdRequestIBDBlocks: "RequestIBDBlocks",
CmdUnexpectedPruningPoint: "UnexpectedPruningPoint",
CmdRequestPruningPointHash: "RequestPruningPointHashHash",
CmdPruningPointHash: "PruningPointHash",
CmdIBDBlockLocator: "IBDBlockLocator",
CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash",
CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound",
CmdBlockHeaders: "BlockHeaders",
CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk",
CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks",
CmdVersion: "Version",
CmdVerAck: "VerAck",
CmdRequestAddresses: "RequestAddresses",
CmdAddresses: "Addresses",
CmdRequestIBDBlocks: "RequestBlocks",
CmdBlock: "Block",
CmdTx: "Tx",
CmdPing: "Ping",
CmdPong: "Pong",
CmdRequestBlockLocator: "RequestBlockLocator",
CmdBlockLocator: "BlockLocator",
CmdSelectedTip: "SelectedTip",
CmdRequestSelectedTip: "RequestSelectedTip",
CmdInvRelayBlock: "InvRelayBlock",
CmdRequestRelayBlocks: "RequestRelayBlocks",
CmdInvTransaction: "InvTransaction",
CmdRequestTransactions: "RequestTransactions",
CmdIBDBlock: "IBDBlock",
CmdRequestNextIBDBlocks: "RequestNextIBDBlocks",
CmdDoneIBDBlocks: "DoneIBDBlocks",
CmdTransactionNotFound: "TransactionNotFound",
CmdReject: "Reject",
}
// RPCMessageCommandToString maps all MessageCommands to their string representation
var RPCMessageCommandToString = map[MessageCommand]string{
CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest",
CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse",
CmdSubmitBlockRequestMessage: "SubmitBlockRequest",
CmdSubmitBlockResponseMessage: "SubmitBlockResponse",
CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest",
CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse",
CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction",
CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest",
CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse",
CmdBlockAddedNotificationMessage: "BlockAddedNotification",
CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest",
CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse",
CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest",
CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse",
CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest",
CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse",
CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest",
CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse",
CmdAddPeerRequestMessage: "AddPeerRequest",
CmdAddPeerResponseMessage: "AddPeerResponse",
CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest",
CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse",
CmdNotifyVirtualSelectedParentChainChangedRequestMessage: "NotifyVirtualSelectedParentChainChangedRequest",
CmdNotifyVirtualSelectedParentChainChangedResponseMessage: "NotifyVirtualSelectedParentChainChangedResponse",
CmdVirtualSelectedParentChainChangedNotificationMessage: "VirtualSelectedParentChainChangedNotification",
CmdGetBlockRequestMessage: "GetBlockRequest",
CmdGetBlockResponseMessage: "GetBlockResponse",
CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest",
CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse",
CmdGetVirtualSelectedParentChainFromBlockRequestMessage: "GetVirtualSelectedParentChainFromBlockRequest",
CmdGetVirtualSelectedParentChainFromBlockResponseMessage: "GetVirtualSelectedParentChainFromBlockResponse",
CmdGetBlocksRequestMessage: "GetBlocksRequest",
CmdGetBlocksResponseMessage: "GetBlocksResponse",
CmdGetBlockCountRequestMessage: "GetBlockCountRequest",
CmdGetBlockCountResponseMessage: "GetBlockCountResponse",
CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest",
CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse",
CmdResolveFinalityConflictRequestMessage: "ResolveFinalityConflictRequest",
CmdResolveFinalityConflictResponseMessage: "ResolveFinalityConflictResponse",
CmdNotifyFinalityConflictsRequestMessage: "NotifyFinalityConflictsRequest",
CmdNotifyFinalityConflictsResponseMessage: "NotifyFinalityConflictsResponse",
CmdFinalityConflictNotificationMessage: "FinalityConflictNotification",
CmdFinalityConflictResolvedNotificationMessage: "FinalityConflictResolvedNotification",
CmdGetMempoolEntriesRequestMessage: "GetMempoolEntriesRequest",
CmdGetMempoolEntriesResponseMessage: "GetMempoolEntriesResponse",
CmdGetHeadersRequestMessage: "GetHeadersRequest",
CmdGetHeadersResponseMessage: "GetHeadersResponse",
CmdNotifyUTXOsChangedRequestMessage: "NotifyUTXOsChangedRequest",
CmdNotifyUTXOsChangedResponseMessage: "NotifyUTXOsChangedResponse",
CmdUTXOsChangedNotificationMessage: "UTXOsChangedNotification",
CmdStopNotifyingUTXOsChangedRequestMessage: "StopNotifyingUTXOsChangedRequest",
CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse",
CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest",
CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse",
CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest",
CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse",
CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest",
CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage: "NotifyVirtualSelectedParentBlueScoreChangedResponse",
CmdVirtualSelectedParentBlueScoreChangedNotificationMessage: "VirtualSelectedParentBlueScoreChangedNotification",
CmdBanRequestMessage: "BanRequest",
CmdBanResponseMessage: "BanResponse",
CmdUnbanRequestMessage: "UnbanRequest",
CmdUnbanResponseMessage: "UnbanResponse",
CmdGetInfoRequestMessage: "GetInfoRequest",
CmdGetInfoResponseMessage: "GeInfoResponse",
CmdNotifyPruningPointUTXOSetOverrideRequestMessage: "NotifyPruningPointUTXOSetOverrideRequest",
CmdNotifyPruningPointUTXOSetOverrideResponseMessage: "NotifyPruningPointUTXOSetOverrideResponse",
CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification",
CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest",
CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse",
CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest",
CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse",
CmdSubmitBlockRequestMessage: "SubmitBlockRequest",
CmdSubmitBlockResponseMessage: "SubmitBlockResponse",
CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest",
CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse",
CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction",
CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest",
CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse",
CmdBlockAddedNotificationMessage: "BlockAddedNotification",
CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest",
CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse",
CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest",
CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse",
CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest",
CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse",
CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest",
CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse",
CmdAddPeerRequestMessage: "AddPeerRequest",
CmdAddPeerResponseMessage: "AddPeerResponse",
CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest",
CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse",
CmdNotifyChainChangedRequestMessage: "NotifyChainChangedRequest",
CmdNotifyChainChangedResponseMessage: "NotifyChainChangedResponse",
CmdChainChangedNotificationMessage: "ChainChangedNotification",
CmdGetBlockRequestMessage: "GetBlockRequest",
CmdGetBlockResponseMessage: "GetBlockResponse",
CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest",
CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse",
CmdGetChainFromBlockRequestMessage: "GetChainFromBlockRequest",
CmdGetChainFromBlockResponseMessage: "GetChainFromBlockResponse",
CmdGetBlocksRequestMessage: "GetBlocksRequest",
CmdGetBlocksResponseMessage: "GetBlocksResponse",
CmdGetBlockCountRequestMessage: "GetBlockCountRequest",
CmdGetBlockCountResponseMessage: "GetBlockCountResponse",
CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest",
CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse",
CmdResolveFinalityConflictRequestMessage: "ResolveFinalityConflictRequest",
CmdResolveFinalityConflictResponseMessage: "ResolveFinalityConflictResponse",
CmdNotifyFinalityConflictsRequestMessage: "NotifyFinalityConflictsRequest",
CmdNotifyFinalityConflictsResponseMessage: "NotifyFinalityConflictsResponse",
CmdFinalityConflictNotificationMessage: "FinalityConflictNotification",
CmdFinalityConflictResolvedNotificationMessage: "FinalityConflictResolvedNotification",
CmdGetMempoolEntriesRequestMessage: "GetMempoolEntriesRequestMessage",
CmdGetMempoolEntriesResponseMessage: "GetMempoolEntriesResponseMessage",
CmdGetHeadersRequestMessage: "GetHeadersRequest",
CmdGetHeadersResponseMessage: "GetHeadersResponse",
}
// Message is an interface that describes a kaspa message. A type that

View File

@@ -0,0 +1,195 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"io"
"math"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumParentBlocks 1 byte + HashMerkleRoot hash +
// + AcceptedIDMerkleRoot hash + UTXOCommitment hash.
// To get total size of block header len(ParentHashes) * daghash.HashSize should be
// added to this value
const BaseBlockHeaderPayload = 25 + 3*(daghash.HashSize)
// MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
const MaxNumParentBlocks = 255
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * daghash.HashSize)
// BlockHeader defines information about a block and is used in the kaspa
// block (MsgBlock) and headers (MsgHeader) messages.
type BlockHeader struct {
// Version of the block. This is not the same as the protocol version.
Version int32
// Hashes of the parent block headers in the blockDAG.
ParentHashes []*daghash.Hash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *daghash.Hash
// AcceptedIDMerkleRoot is merkle tree reference to hash all transactions
// accepted form the block.Blues
AcceptedIDMerkleRoot *daghash.Hash
// UTXOCommitment is an ECMH UTXO commitment to the block UTXO.
UTXOCommitment *daghash.Hash
// Time the block was created.
Timestamp mstime.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint64
}
// NumParentBlocks return the number of entries in ParentHashes
func (h *BlockHeader) NumParentBlocks() byte {
numParents := len(h.ParentHashes)
if numParents > math.MaxUint8 {
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
}
return byte(numParents)
}
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() *daghash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions.
writer := daghash.NewDoubleHashWriter()
err := writeBlockHeader(writer, 0, h)
if err != nil {
// It seems like this could only happen if the writer returned an error.
// and this writer should never return an error (no allocations or possible failures)
// the only non-writer error path here is unknown types in `WriteElement`
panic(fmt.Sprintf("BlockHash() failed. this should never fail unless BlockHeader was changed. err: %+v", err))
}
res := writer.Finalize()
return &res
}
// IsGenesis returns true iff this block is a genesis block
func (h *BlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding block headers stored to disk, such as in a
// database, as opposed to decoding block headers from the appmessage.
func (h *BlockHeader) KaspaDecode(r io.Reader, pver uint32) error {
return readBlockHeader(r, pver, h)
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding block headers to be stored to disk, such as in a
// database, as opposed to encoding block headers for the appmessage.
func (h *BlockHeader) KaspaEncode(w io.Writer, pver uint32) error {
return writeBlockHeader(w, pver, h)
}
// Deserialize decodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of readBlockHeader.
return readBlockHeader(r, 0, h)
}
// Serialize encodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
}
// SerializeSize returns the number of bytes it would take to serialize the
// block header.
func (h *BlockHeader) SerializeSize() int {
return BaseBlockHeaderPayload + int(h.NumParentBlocks())*daghash.HashSize
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version int32, parentHashes []*daghash.Hash, hashMerkleRoot *daghash.Hash,
acceptedIDMerkleRoot *daghash.Hash, utxoCommitment *daghash.Hash, bits uint32, nonce uint64) *BlockHeader {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
}
}
// readBlockHeader reads a kaspa block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the appmessage.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error {
var numParentBlocks byte
err := readElements(r, &bh.Version, &numParentBlocks)
if err != nil {
return err
}
bh.ParentHashes = make([]*daghash.Hash, numParentBlocks)
for i := byte(0); i < numParentBlocks; i++ {
hash := &daghash.Hash{}
err := ReadElement(r, hash)
if err != nil {
return err
}
bh.ParentHashes[i] = hash
}
bh.HashMerkleRoot = &daghash.Hash{}
bh.AcceptedIDMerkleRoot = &daghash.Hash{}
bh.UTXOCommitment = &daghash.Hash{}
return readElements(r, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment,
(*int64Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
// writeBlockHeader writes a kaspa block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the appmessage.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
timestamp := bh.Timestamp.UnixMilliseconds()
if err := writeElements(w, bh.Version, bh.NumParentBlocks()); err != nil {
return err
}
for _, hash := range bh.ParentHashes {
if err := WriteElement(w, hash); err != nil {
return err
}
}
return writeElements(w, bh.HashMerkleRoot, bh.AcceptedIDMerkleRoot, bh.UTXOCommitment, timestamp, bh.Bits, bh.Nonce)
}

View File

@@ -0,0 +1,345 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"bytes"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/random"
"reflect"
"testing"
)
// TestBlockHeader tests the BlockHeader API.
func TestBlockHeader(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
hashes := []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff)
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if !bh.HashMerkleRoot.IsEqual(merkleHash) {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
spew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))
}
if bh.Bits != bits {
t.Errorf("NewBlockHeader: wrong bits - got %v, want %v",
bh.Bits, bits)
}
if bh.Nonce != nonce {
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
bh.Nonce, nonce)
}
}
// TestBlockHeaderEncoding tests the BlockHeader appmessage encode and decode for various
// protocol versions.
func TestBlockHeaderEncoding(t *testing.T) {
nonce := uint64(123123) // 0x000000000001e0f3
pver := ProtocolVersion
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
in *BlockHeader // Data to encode
out *BlockHeader // Expected decoded data
buf []byte // Encoded data
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
baseBlockHdr,
baseBlockHdr,
baseBlockHdrEncoded,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
var buf bytes.Buffer
err := writeBlockHeader(&buf, test.pver, test.in)
if err != nil {
t.Errorf("writeBlockHeader #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("writeBlockHeader #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
buf.Reset()
err = test.in.KaspaEncode(&buf, pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the block header from appmessage format.
var bh BlockHeader
rbuf := bytes.NewReader(test.buf)
err = readBlockHeader(rbuf, test.pver, &bh)
if err != nil {
t.Errorf("readBlockHeader #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("readBlockHeader #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
rbuf = bytes.NewReader(test.buf)
err = bh.KaspaDecode(rbuf, pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
}
}
// TestBlockHeaderSerialize tests BlockHeader serialize and deserialize.
func TestBlockHeaderSerialize(t *testing.T) {
nonce := uint64(123123) // 0x01e0f3
// baseBlockHdr is used in the various tests as a baseline BlockHeader.
bits := uint32(0x1d00ffff)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
Timestamp: mstime.UnixMilliseconds(0x17315ed0f99),
Bits: bits,
Nonce: nonce,
}
// baseBlockHdrEncoded is the appmessage encoded bytes of baseBlockHdr.
baseBlockHdrEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1,
0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot
0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f,
0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a,
0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0xf3, 0xe0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
}
tests := []struct {
in *BlockHeader // Data to encode
out *BlockHeader // Expected decoded data
buf []byte // Serialized data
}{
{
baseBlockHdr,
baseBlockHdr,
baseBlockHdrEncoded,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block header.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the block header.
var bh BlockHeader
rbuf := bytes.NewReader(test.buf)
err = bh.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&bh, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&bh), spew.Sdump(test.out))
continue
}
}
}
// TestBlockHeaderSerializeSize performs tests to ensure the serialize size for
// various block headers is accurate.
func TestBlockHeaderSerializeSize(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: &daghash.ZeroHash,
UTXOCommitment: &daghash.ZeroHash,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *BlockHeader // Block header to encode
size int // Expected serialized size
}{
// Block with no transactions.
{genesisBlockHdr, 121},
// First block in the mainnet block DAG.
{baseBlockHdr, 185},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := test.in.SerializeSize()
if serializedSize != test.size {
t.Errorf("BlockHeader.SerializeSize: #%d got: %d, want: "+
"%d", i, serializedSize, test.size)
continue
}
}
}
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *BlockHeader // Block header to encode
isGenesis bool // Expected result for call of .IsGenesis
}{
{genesisBlockHdr, true},
{baseBlockHdr, false},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
isGenesis := test.in.IsGenesis()
if isGenesis != test.isGenesis {
t.Errorf("BlockHeader.IsGenesis: #%d got: %t, want: %t",
i, isGenesis, test.isGenesis)
}
}
}

View File

@@ -1,19 +0,0 @@
package appmessage
// BlockHeadersMessage represents a kaspa BlockHeaders message
type BlockHeadersMessage struct {
baseMessage
BlockHeaders []*MsgBlockHeader
}
// Command returns the protocol command string for the message
func (msg *BlockHeadersMessage) Command() MessageCommand {
return CmdBlockHeaders
}
// NewBlockHeadersMessage returns a new kaspa BlockHeaders message
func NewBlockHeadersMessage(blockHeaders []*MsgBlockHeader) *BlockHeadersMessage {
return &BlockHeadersMessage{
BlockHeaders: blockHeaders,
}
}

View File

@@ -4,15 +4,58 @@
package appmessage
import (
"fmt"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxAddressesPerMsg is the maximum number of addresses that can be in a single
// kaspa Addresses message (MsgAddresses).
const MaxAddressesPerMsg = 1000
// MsgAddresses implements the Message interface and represents a kaspa
// Addresses message.
// Addresses message. It is used to provide a list of known active peers on the
// network. An active peer is considered one that has transmitted a message
// within the last 3 hours. Nodes which have not transmitted in that time
// frame should be forgotten. Each message is limited to a maximum number of
// addresses, which is currently 1000. As a result, multiple messages must
// be used to relay the full list.
//
// Use the AddAddress function to build up the list of known addresses when
// sending an Addresses message to another peer.
type MsgAddresses struct {
baseMessage
AddressList []*NetAddress
IncludeAllSubnetworks bool
SubnetworkID *subnetworkid.SubnetworkID
AddrList []*NetAddress
}
// AddAddress adds a known active peer to the message.
func (msg *MsgAddresses) AddAddress(na *NetAddress) error {
if len(msg.AddrList)+1 > MaxAddressesPerMsg {
str := fmt.Sprintf("too many addresses in message [max %d]",
MaxAddressesPerMsg)
return messageError("MsgAddresses.AddAddress", str)
}
msg.AddrList = append(msg.AddrList, na)
return nil
}
// AddAddresses adds multiple known active peers to the message.
func (msg *MsgAddresses) AddAddresses(netAddrs ...*NetAddress) error {
for _, na := range netAddrs {
err := msg.AddAddress(na)
if err != nil {
return err
}
}
return nil
}
// ClearAddresses removes all addresses from the message.
func (msg *MsgAddresses) ClearAddresses() {
msg.AddrList = []*NetAddress{}
}
// Command returns the protocol command string for the message. This is part
@@ -23,8 +66,10 @@ func (msg *MsgAddresses) Command() MessageCommand {
// NewMsgAddresses returns a new kaspa Addresses message that conforms to the
// Message interface. See MsgAddresses for details.
func NewMsgAddresses(addressList []*NetAddress) *MsgAddresses {
func NewMsgAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgAddresses {
return &MsgAddresses{
AddressList: addressList,
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,
AddrList: make([]*NetAddress, 0, MaxAddressesPerMsg),
}
}

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"net"
"testing"
"github.com/davecgh/go-spew/spew"
)
// TestAddresses tests the MsgAddresses API.
func TestAddresses(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(3)
msg := NewMsgAddresses(false, nil)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgAddresses: wrong command - got %v want %v",
cmd, wantCmd)
}
// Ensure NetAddresses are added properly.
tcpAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
na := NewNetAddress(tcpAddr, SFNodeNetwork)
err := msg.AddAddress(na)
if err != nil {
t.Errorf("AddAddress: %v", err)
}
if msg.AddrList[0] != na {
t.Errorf("AddAddress: wrong address added - got %v, want %v",
spew.Sprint(msg.AddrList[0]), spew.Sprint(na))
}
// Ensure the address list is cleared properly.
msg.ClearAddresses()
if len(msg.AddrList) != 0 {
t.Errorf("ClearAddresses: address list is not empty - "+
"got %v [%v], want %v", len(msg.AddrList),
spew.Sprint(msg.AddrList[0]), 0)
}
// Ensure adding more than the max allowed addresses per message returns
// error.
for i := 0; i < MaxAddressesPerMsg+1; i++ {
err = msg.AddAddress(na)
}
if err == nil {
t.Errorf("AddAddress: expected error on too many addresses " +
"not received")
}
err = msg.AddAddresses(na)
if err == nil {
t.Errorf("AddAddresses: expected error on too many addresses " +
"not received")
}
}

View File

@@ -5,7 +5,13 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"bytes"
"fmt"
"io"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
)
// defaultTransactionAlloc is the default size used for the backing array
@@ -40,7 +46,7 @@ type TxLoc struct {
// response to a getdata message (MsgGetData) for a given block hash.
type MsgBlock struct {
baseMessage
Header MsgBlockHeader
Header BlockHeader
Transactions []*MsgTx
}
@@ -54,6 +60,161 @@ func (msg *MsgBlock) ClearTransactions() {
msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding blocks stored to disk, such as in a database, as
// opposed to decoding blocks from the appmessage.
func (msg *MsgBlock) KaspaDecode(r io.Reader, pver uint32) error {
err := readBlockHeader(r, pver, &msg.Header)
if err != nil {
return err
}
txCount, err := ReadVarInt(r)
if err != nil {
return err
}
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, MaxTxPerBlock)
return messageError("MsgBlock.KaspaDecode", str)
}
msg.Transactions = make([]*MsgTx, 0, txCount)
for i := uint64(0); i < txCount; i++ {
tx := MsgTx{}
err := tx.KaspaDecode(r, pver)
if err != nil {
return err
}
msg.Transactions = append(msg.Transactions, &tx)
}
return nil
}
// Deserialize decodes a block from r into the receiver using a format that is
// suitable for long-term storage such as a database while respecting the
// Version field in the block. This function differs from KaspaDecode in that
// KaspaDecode decodes from the kaspa appmessage protocol as it was sent across the
// network. The appmessage encoding can technically differ depending on the protocol
// version and doesn't even really need to match the format of a stored block at
// all. As of the time this comment was written, the encoded block is the same
// in both instances, but there is a distinct difference and separating the two
// allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
}
// DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes
// a byte buffer instead of a generic reader and returns a slice containing the
// start and length of each transaction within the raw data that is being
// deserialized.
func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
fullLen := r.Len()
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of existing appmessage protocol functions.
err := readBlockHeader(r, 0, &msg.Header)
if err != nil {
return nil, err
}
txCount, err := ReadVarInt(r)
if err != nil {
return nil, err
}
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > MaxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, MaxTxPerBlock)
return nil, messageError("MsgBlock.DeserializeTxLoc", str)
}
// Deserialize each transaction while keeping track of its location
// within the byte stream.
msg.Transactions = make([]*MsgTx, 0, txCount)
txLocs := make([]TxLoc, txCount)
for i := uint64(0); i < txCount; i++ {
txLocs[i].TxStart = fullLen - r.Len()
tx := MsgTx{}
err := tx.Deserialize(r)
if err != nil {
return nil, err
}
msg.Transactions = append(msg.Transactions, &tx)
txLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart
}
return txLocs, nil
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding blocks to be stored to disk, such as in a
// database, as opposed to encoding blocks for the appmessage.
func (msg *MsgBlock) KaspaEncode(w io.Writer, pver uint32) error {
err := writeBlockHeader(w, pver, &msg.Header)
if err != nil {
return err
}
err = WriteVarInt(w, uint64(len(msg.Transactions)))
if err != nil {
return err
}
for _, tx := range msg.Transactions {
err = tx.KaspaEncode(w, pver)
if err != nil {
return err
}
}
return nil
}
// Serialize encodes the block to w using a format that suitable for long-term
// storage such as a database while respecting the Version field in the block.
// This function differs from KaspaEncode in that KaspaEncode encodes the block to
// the kaspa appmessage protocol in order to be sent across the network. The appmessage
// encoding can technically differ depending on the protocol version and doesn't
// even really need to match the format of a stored block at all. As of the
// time this comment was written, the encoded block is the same in both
// instances, but there is a distinct difference and separating the two allows
// the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Serialize(w io.Writer) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
// SerializeSize returns the number of bytes it would take to serialize the
// block.
func (msg *MsgBlock) SerializeSize() int {
// Block header bytes + Serialized varint size for the number of
// transactions.
n := msg.Header.SerializeSize() + VarIntSerializeSize(uint64(len(msg.Transactions)))
for _, tx := range msg.Transactions {
n += tx.SerializeSize()
}
return n
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlock) Command() MessageCommand {
@@ -66,12 +227,17 @@ func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {
return MaxMessagePayload
}
// BlockHash computes the block identifier hash for this block.
func (msg *MsgBlock) BlockHash() *daghash.Hash {
return msg.Header.BlockHash()
}
// ConvertToPartial clears out all the payloads of the subnetworks that are
// incompatible with the given subnetwork ID.
// Note: this operation modifies the block in place.
func (msg *MsgBlock) ConvertToPartial(subnetworkID *externalapi.DomainSubnetworkID) {
func (msg *MsgBlock) ConvertToPartial(subnetworkID *subnetworkid.SubnetworkID) {
for _, tx := range msg.Transactions {
if !tx.SubnetworkID.Equal(subnetworkID) {
if !tx.SubnetworkID.IsEqual(subnetworkID) {
tx.Payload = []byte{}
}
}
@@ -79,7 +245,7 @@ func (msg *MsgBlock) ConvertToPartial(subnetworkID *externalapi.DomainSubnetwork
// NewMsgBlock returns a new kaspa block message that conforms to the
// Message interface. See MsgBlock for details.
func NewMsgBlock(blockHeader *MsgBlockHeader) *MsgBlock {
func NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {
return &MsgBlock{
Header: *blockHeader,
Transactions: make([]*MsgTx, 0, defaultTransactionAlloc),

View File

@@ -5,15 +5,17 @@
package appmessage
import (
"github.com/davecgh/go-spew/spew"
"bytes"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlock tests the MsgBlock API.
@@ -69,31 +71,46 @@ func TestBlock(t *testing.T) {
}
}
func TestConvertToPartial(t *testing.T) {
localSubnetworkID := &externalapi.DomainSubnetworkID{0x12}
// TestBlockHash tests the ability to generate the hash of a block accurately.
func TestBlockHash(t *testing.T) {
// Block 1 hash.
hashStr := "55d71bd49a8233bc9f0edbcbd0ad5d3eaebffe1fc6a6443a1c1f310fd02c11a5"
wantHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the hash produced is expected.
blockHash := blockOne.BlockHash()
if !blockHash.IsEqual(wantHash) {
t.Errorf("BlockHash: wrong hash - got %v, want %v",
spew.Sprint(blockHash), spew.Sprint(wantHash))
}
}
func TestConvertToPartial(t *testing.T) {
transactions := []struct {
subnetworkID *externalapi.DomainSubnetworkID
subnetworkID *subnetworkid.SubnetworkID
payload []byte
expectedPayloadLength int
}{
{
subnetworkID: &subnetworks.SubnetworkIDNative,
subnetworkID: subnetworkid.SubnetworkIDNative,
payload: []byte{},
expectedPayloadLength: 0,
},
{
subnetworkID: &subnetworks.SubnetworkIDRegistry,
subnetworkID: subnetworkid.SubnetworkIDRegistry,
payload: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
expectedPayloadLength: 0,
},
{
subnetworkID: localSubnetworkID,
subnetworkID: &subnetworkid.SubnetworkID{123},
payload: []byte{0x01},
expectedPayloadLength: 1,
},
{
subnetworkID: &externalapi.DomainSubnetworkID{0x34},
subnetworkID: &subnetworkid.SubnetworkID{234},
payload: []byte{0x02},
expectedPayloadLength: 0,
},
@@ -105,33 +122,388 @@ func TestConvertToPartial(t *testing.T) {
block.Transactions = append(block.Transactions, NewSubnetworkMsgTx(1, nil, nil, transaction.subnetworkID, 0, payload))
}
block.ConvertToPartial(localSubnetworkID)
block.ConvertToPartial(&subnetworkid.SubnetworkID{123})
for _, testTransaction := range transactions {
for _, transaction := range transactions {
var subnetworkTx *MsgTx
for _, blockTransaction := range block.Transactions {
if blockTransaction.SubnetworkID.Equal(testTransaction.subnetworkID) {
subnetworkTx = blockTransaction
for _, tx := range block.Transactions {
if tx.SubnetworkID.IsEqual(transaction.subnetworkID) {
subnetworkTx = tx
}
}
if subnetworkTx == nil {
t.Errorf("ConvertToPartial: subnetworkID '%s' not found in block!", testTransaction.subnetworkID)
t.Errorf("ConvertToPartial: subnetworkID '%s' not found in block!", transaction.subnetworkID)
continue
}
payloadLength := len(subnetworkTx.Payload)
if payloadLength != testTransaction.expectedPayloadLength {
if payloadLength != transaction.expectedPayloadLength {
t.Errorf("ConvertToPartial: unexpected payload length for subnetwork '%s': expected: %d, got: %d",
testTransaction.subnetworkID, testTransaction.expectedPayloadLength, payloadLength)
transaction.subnetworkID, transaction.expectedPayloadLength, payloadLength)
}
}
}
//blockOne is the first block in the mainnet block DAG.
// TestBlockEncoding tests the MsgBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgBlock // Message to encode
out *MsgBlock // Expected decoded message
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgBlock
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestBlockEncodingErrors performs negative tests against appmessage encode and decode
// of MsgBlock to confirm error paths work correctly.
func TestBlockEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgBlock // Value to encode
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, pver, 0, io.ErrShortWrite, io.EOF},
// Force error in num block hashes.
{&blockOne, blockOneBytes, pver, 4, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #1.
{&blockOne, blockOneBytes, pver, 5, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #2.
{&blockOne, blockOneBytes, pver, 37, io.ErrShortWrite, io.EOF},
// Force error in hash merkle root.
{&blockOne, blockOneBytes, pver, 69, io.ErrShortWrite, io.EOF},
// Force error in accepted ID merkle root.
{&blockOne, blockOneBytes, pver, 101, io.ErrShortWrite, io.EOF},
// Force error in utxo commitment.
{&blockOne, blockOneBytes, pver, 133, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, pver, 165, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, pver, 173, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, pver, 177, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, pver, 185, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, pver, 186, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.KaspaEncode(w, test.pver)
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
var msg MsgBlock
r := newFixedReader(test.max, test.buf)
err = msg.KaspaDecode(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockSerialize tests MsgBlock serialize and deserialize.
func TestBlockSerialize(t *testing.T) {
tests := []struct {
in *MsgBlock // Message to encode
out *MsgBlock // Expected decoded message
buf []byte // Serialized data
txLocs []TxLoc // Expected transaction locations
}{
{
&blockOne,
&blockOne,
blockOneBytes,
blockOneTxLocs,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the block.
var block MsgBlock
rbuf := bytes.NewReader(test.buf)
err = block.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&block, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&block), spew.Sdump(test.out))
continue
}
// Deserialize the block while gathering transaction location
// information.
var txLocBlock MsgBlock
br := bytes.NewBuffer(test.buf)
txLocs, err := txLocBlock.DeserializeTxLoc(br)
if err != nil {
t.Errorf("DeserializeTxLoc #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&txLocBlock, test.out) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(&txLocBlock), spew.Sdump(test.out))
continue
}
if !reflect.DeepEqual(txLocs, test.txLocs) {
t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i,
spew.Sdump(txLocs), spew.Sdump(test.txLocs))
continue
}
}
}
// TestBlockSerializeErrors performs negative tests against appmessage encode and
// decode of MsgBlock to confirm error paths work correctly.
func TestBlockSerializeErrors(t *testing.T) {
tests := []struct {
in *MsgBlock // Value to encode
buf []byte // Serialized data
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{&blockOne, blockOneBytes, 0, io.ErrShortWrite, io.EOF},
// Force error in numParentBlocks.
{&blockOne, blockOneBytes, 4, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #1.
{&blockOne, blockOneBytes, 5, io.ErrShortWrite, io.EOF},
// Force error in prev block hash #2.
{&blockOne, blockOneBytes, 37, io.ErrShortWrite, io.EOF},
// Force error in hash merkle root.
{&blockOne, blockOneBytes, 69, io.ErrShortWrite, io.EOF},
// Force error in accepted ID merkle root.
{&blockOne, blockOneBytes, 101, io.ErrShortWrite, io.EOF},
// Force error in utxo commitment.
{&blockOne, blockOneBytes, 133, io.ErrShortWrite, io.EOF},
// Force error in timestamp.
{&blockOne, blockOneBytes, 165, io.ErrShortWrite, io.EOF},
// Force error in difficulty bits.
{&blockOne, blockOneBytes, 173, io.ErrShortWrite, io.EOF},
// Force error in header nonce.
{&blockOne, blockOneBytes, 177, io.ErrShortWrite, io.EOF},
// Force error in transaction count.
{&blockOne, blockOneBytes, 185, io.ErrShortWrite, io.EOF},
// Force error in transactions.
{&blockOne, blockOneBytes, 186, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the block.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Deserialize the block.
var block MsgBlock
r := newFixedReader(test.max, test.buf)
err = block.Deserialize(r)
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
var txLocBlock MsgBlock
br := bytes.NewBuffer(test.buf[0:test.max])
_, err = txLocBlock.DeserializeTxLoc(br)
if !errors.Is(err, test.readErr) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestBlockOverflowErrors performs tests to ensure deserializing blocks which
// are intentionally crafted to use large values for the number of transactions
// are handled properly. This could otherwise potentially be used as an attack
// vector.
func TestBlockOverflowErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
err error // Expected error
}{
// Block that claims to have ~uint64(0) transactions.
{
[]byte{
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, // mainnetGenesisHash
0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash
0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0,
0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91,
0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68,
0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, // HashMerkleRoot
0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61,
0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32,
0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a,
0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment
0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87,
0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63,
0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F,
0x61, 0xbc, 0x66, 0x49, 0x00, 0x00, 0x00, 0x00, // Timestamp
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // TxnCount
}, pver, &MessageError{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
var msg MsgBlock
r := bytes.NewReader(test.buf)
err := msg.KaspaDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Deserialize with transaction location info from appmessage format.
br := bytes.NewBuffer(test.buf)
_, err = msg.DeserializeTxLoc(br)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+
"want: %v", i, err, reflect.TypeOf(test.err))
continue
}
}
}
// TestBlockSerializeSize performs tests to ensure the serialize size for
// various blocks is accurate.
func TestBlockSerializeSize(t *testing.T) {
// Block with no transactions.
noTxBlock := NewMsgBlock(&blockOne.Header)
tests := []struct {
in *MsgBlock // Block to encode
size int // Expected serialized size
}{
// Block with no transactions.
{noTxBlock, 186},
// First block in the mainnet block DAG.
{&blockOne, len(blockOneBytes)},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := test.in.SerializeSize()
if serializedSize != test.size {
t.Errorf("MsgBlock.SerializeSize: #%d got: %d, want: "+
"%d", i, serializedSize, test.size)
continue
}
}
}
// blockOne is the first block in the mainnet block DAG.
var blockOne = MsgBlock{
Header: MsgBlockHeader{
Version: 0,
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
Header: BlockHeader{
Version: 1,
ParentHashes: []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot,
UTXOCommitment: exampleUTXOCommitment,
@@ -144,7 +516,7 @@ var blockOne = MsgBlock{
[]*TxIn{
{
PreviousOutpoint: Outpoint{
TxID: externalapi.DomainTransactionID{},
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: []byte{
@@ -156,21 +528,19 @@ var blockOne = MsgBlock{
[]*TxOut{
{
Value: 0x12a05f200,
ScriptPubKey: &externalapi.ScriptPublicKey{
Script: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
},
Version: 0},
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c,
0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16,
0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c,
0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c,
0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4,
0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6,
0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e,
0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58,
0xee, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
}),
},
@@ -178,7 +548,7 @@ var blockOne = MsgBlock{
// Block one serialized bytes.
var blockOneBytes = []byte{
0x00, 0x00, // Version 0
0x01, 0x00, 0x00, 0x00, // Version 1
0x02, // NumParentBlocks
0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash
0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b,
@@ -204,7 +574,7 @@ var blockOneBytes = []byte{
0xff, 0xff, 0x00, 0x1d, // Bits
0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce
0x01, // TxnCount
0x00, 0x00, 0x00, 0x00, // Version
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of transaction inputs
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

View File

@@ -1,108 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"math"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
)
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
// not including the list of parent block headers.
// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes +
// + NumParentBlocks 1 byte + HashMerkleRoot hash +
// + AcceptedIDMerkleRoot hash + UTXOCommitment hash.
// To get total size of block header len(ParentHashes) * externalapi.DomainHashSize should be
// added to this value
const BaseBlockHeaderPayload = 25 + 3*(externalapi.DomainHashSize)
// MaxNumParentBlocks is the maximum number of parent blocks a block can reference.
// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte
const MaxNumParentBlocks = 255
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks
const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * externalapi.DomainHashSize)
// MsgBlockHeader defines information about a block and is used in the kaspa
// block (MsgBlock) and headers (MsgHeader) messages.
type MsgBlockHeader struct {
baseMessage
// Version of the block. This is not the same as the protocol version.
Version uint16
// Hashes of the parent block headers in the blockDAG.
ParentHashes []*externalapi.DomainHash
// HashMerkleRoot is the merkle tree reference to hash of all transactions for the block.
HashMerkleRoot *externalapi.DomainHash
// AcceptedIDMerkleRoot is merkle tree reference to hash all transactions
// accepted form the block.Blues
AcceptedIDMerkleRoot *externalapi.DomainHash
// UTXOCommitment is an ECMH UTXO commitment to the block UTXO.
UTXOCommitment *externalapi.DomainHash
// Time the block was created.
Timestamp mstime.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint64
}
// NumParentBlocks return the number of entries in ParentHashes
func (h *MsgBlockHeader) NumParentBlocks() byte {
numParents := len(h.ParentHashes)
if numParents > math.MaxUint8 {
panic(errors.Errorf("number of parents is %d, which is more than one byte can fit", numParents))
}
return byte(numParents)
}
// BlockHash computes the block identifier hash for the given block header.
func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash {
return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h))
}
// IsGenesis returns true iff this block is a genesis block
func (h *MsgBlockHeader) IsGenesis() bool {
return h.NumParentBlocks() == 0
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (h *MsgBlockHeader) Command() MessageCommand {
return CmdHeader
}
// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous
// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the
// block with defaults or calclulated values for the remaining fields.
func NewBlockHeader(version uint16, parentHashes []*externalapi.DomainHash, hashMerkleRoot *externalapi.DomainHash,
acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce uint64) *MsgBlockHeader {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
return &MsgBlockHeader{
Version: version,
ParentHashes: parentHashes,
HashMerkleRoot: hashMerkleRoot,
AcceptedIDMerkleRoot: acceptedIDMerkleRoot,
UTXOCommitment: utxoCommitment,
Timestamp: mstime.Now(),
Bits: bits,
Nonce: nonce,
}
}

View File

@@ -1,84 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
)
// TestBlockHeader tests the MsgBlockHeader API.
func TestBlockHeader(t *testing.T) {
nonce := uint64(0xba4d87a69924a93d)
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}
merkleHash := mainnetGenesisMerkleRoot
acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot
bits := uint32(0x1d00ffff)
bh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)
// Ensure we get the same data back out.
if !reflect.DeepEqual(bh.ParentHashes, hashes) {
t.Errorf("NewBlockHeader: wrong prev hashes - got %v, want %v",
spew.Sprint(bh.ParentHashes), spew.Sprint(hashes))
}
if bh.HashMerkleRoot != merkleHash {
t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v",
spew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))
}
if bh.Bits != bits {
t.Errorf("NewBlockHeader: wrong bits - got %v, want %v",
bh.Bits, bits)
}
if bh.Nonce != nonce {
t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v",
bh.Nonce, nonce)
}
}
func TestIsGenesis(t *testing.T) {
nonce := uint64(123123) // 0x1e0f3
bits := uint32(0x1d00ffff)
timestamp := mstime.UnixMilliseconds(0x495fab29000)
baseBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
genesisBlockHdr := &MsgBlockHeader{
Version: 1,
ParentHashes: []*externalapi.DomainHash{},
HashMerkleRoot: mainnetGenesisMerkleRoot,
Timestamp: timestamp,
Bits: bits,
Nonce: nonce,
}
tests := []struct {
in *MsgBlockHeader // Block header to encode
isGenesis bool // Expected result for call of .IsGenesis
}{
{genesisBlockHdr, true},
{baseBlockHdr, false},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
isGenesis := test.in.IsGenesis()
if isGenesis != test.isGenesis {
t.Errorf("MsgBlockHeader.IsGenesis: #%d got: %t, want: %t",
i, isGenesis, test.isGenesis)
}
}
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed
@@ -13,7 +13,7 @@ const MaxBlockLocatorsPerMsg = 500
// syncing with you.
type MsgBlockLocator struct {
baseMessage
BlockLocatorHashes []*externalapi.DomainHash
BlockLocatorHashes []*daghash.Hash
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgBlockLocator) Command() MessageCommand {
// NewMsgBlockLocator returns a new kaspa locator message that conforms to
// the Message interface. See MsgBlockLocator for details.
func NewMsgBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgBlockLocator {
func NewMsgBlockLocator(locatorHashes []*daghash.Hash) *MsgBlockLocator {
return &MsgBlockLocator{
BlockLocatorHashes: locatorHashes,
}

View File

@@ -3,20 +3,19 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestBlockLocator tests the MsgBlockLocator API.
func TestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
locatorHash, err := externalapi.NewDomainHashFromString(hashStr)
locatorHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
msg := NewMsgBlockLocator([]*externalapi.DomainHash{locatorHash})
msg := NewMsgBlockLocator([]*daghash.Hash{locatorHash})
// Ensure the command is expected value.
wantCmd := MessageCommand(10)

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgDoneHeaders implements the Message interface and represents a kaspa
// DoneHeaders message. It is used to notify the IBD syncing peer that the
// syncer sent all the requested headers.
//
// This message has no payload.
type MsgDoneHeaders struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneHeaders) Command() MessageCommand {
return CmdDoneHeaders
}
// NewMsgDoneHeaders returns a new kaspa DoneIBDBlocks message that conforms to the
// Message interface.
func NewMsgDoneHeaders() *MsgDoneHeaders {
return &MsgDoneHeaders{}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgDoneIBDBlocks implements the Message interface and represents a kaspa
// DoneIBDBlocks message. It is used to notify the IBD syncing peer that the
// syncer sent all the requested blocks.
//
// This message has no payload.
type MsgDoneIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgDoneIBDBlocks) Command() MessageCommand {
return CmdDoneIBDBlocks
}
// NewMsgDoneIBDBlocks returns a new kaspa DoneIBDBlocks message that conforms to the
// Message interface.
func NewMsgDoneIBDBlocks() *MsgDoneIBDBlocks {
return &MsgDoneIBDBlocks{}
}

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgDonePruningPointUTXOSetChunks represents a kaspa DonePruningPointUTXOSetChunks message
type MsgDonePruningPointUTXOSetChunks struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgDonePruningPointUTXOSetChunks) Command() MessageCommand {
return CmdDonePruningPointUTXOSetChunks
}
// NewMsgDonePruningPointUTXOSetChunks returns a new MsgDonePruningPointUTXOSetChunks.
func NewMsgDonePruningPointUTXOSetChunks() *MsgDonePruningPointUTXOSetChunks {
return &MsgDonePruningPointUTXOSetChunks{}
}

View File

@@ -5,6 +5,7 @@
package appmessage
import (
"bytes"
"reflect"
"testing"
@@ -25,7 +26,7 @@ func TestIBDBlock(t *testing.T) {
bh := NewBlockHeader(1, parentHashes, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce)
// Ensure the command is expected value.
wantCmd := MessageCommand(15)
wantCmd := MessageCommand(17)
msg := NewMsgIBDBlock(NewMsgBlock(bh))
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgIBDBlock: wrong command - got %v want %v",
@@ -63,3 +64,55 @@ func TestIBDBlock(t *testing.T) {
len(msg.Transactions), 0)
}
}
// TestIBDBlockEncoding tests the MsgIBDBlock appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestIBDBlockEncoding(t *testing.T) {
tests := []struct {
in *MsgIBDBlock // Message to encode
out *MsgIBDBlock // Expected decoded message
buf []byte // Encoded value
txLocs []TxLoc // Expected transaction locations
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version.
{
&MsgIBDBlock{MsgBlock: &blockOne},
&MsgIBDBlock{MsgBlock: &blockOne},
blockOneBytes,
blockOneTxLocs,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgIBDBlock
msg.MsgBlock = new(MsgBlock)
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}

View File

@@ -1,27 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDBlockLocator represents a kaspa ibdBlockLocator message
type MsgIBDBlockLocator struct {
baseMessage
TargetHash *externalapi.DomainHash
BlockLocatorHashes []*externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgIBDBlockLocator) Command() MessageCommand {
return CmdIBDBlockLocator
}
// NewMsgIBDBlockLocator returns a new kaspa ibdBlockLocator message
func NewMsgIBDBlockLocator(targetHash *externalapi.DomainHash,
blockLocatorHashes []*externalapi.DomainHash) *MsgIBDBlockLocator {
return &MsgIBDBlockLocator{
TargetHash: targetHash,
BlockLocatorHashes: blockLocatorHashes,
}
}

View File

@@ -1,23 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgIBDBlockLocatorHighestHash represents a kaspa BlockLocatorHighestHash message
type MsgIBDBlockLocatorHighestHash struct {
baseMessage
HighestHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgIBDBlockLocatorHighestHash) Command() MessageCommand {
return CmdIBDBlockLocatorHighestHash
}
// NewMsgIBDBlockLocatorHighestHash returns a new BlockLocatorHighestHash message
func NewMsgIBDBlockLocatorHighestHash(highestHash *externalapi.DomainHash) *MsgIBDBlockLocatorHighestHash {
return &MsgIBDBlockLocatorHighestHash{
HighestHash: highestHash,
}
}

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgIBDBlockLocatorHighestHashNotFound represents a kaspa BlockLocatorHighestHashNotFound message
type MsgIBDBlockLocatorHighestHashNotFound struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgIBDBlockLocatorHighestHashNotFound) Command() MessageCommand {
return CmdIBDBlockLocatorHighestHashNotFound
}
// NewMsgIBDBlockLocatorHighestHashNotFound returns a new IBDBlockLocatorHighestHashNotFound message
func NewMsgIBDBlockLocatorHighestHashNotFound() *MsgIBDBlockLocatorHighestHashNotFound {
return &MsgIBDBlockLocatorHighestHashNotFound{}
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgInvRelayBlock implements the Message interface and represents a kaspa
@@ -9,7 +9,7 @@ import (
// by sending their hash, and let the receiving node decide if it needs it.
type MsgInvRelayBlock struct {
baseMessage
Hash *externalapi.DomainHash
Hash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
@@ -20,7 +20,7 @@ func (msg *MsgInvRelayBlock) Command() MessageCommand {
// NewMsgInvBlock returns a new kaspa invrelblk message that conforms to
// the Message interface. See MsgInvRelayBlock for details.
func NewMsgInvBlock(hash *externalapi.DomainHash) *MsgInvRelayBlock {
func NewMsgInvBlock(hash *daghash.Hash) *MsgInvRelayBlock {
return &MsgInvRelayBlock{
Hash: hash,
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxInvPerTxInvMsg is the maximum number of hashes that can
@@ -13,7 +13,7 @@ const MaxInvPerTxInvMsg = MaxInvPerMsg
// by sending their ID, and let the receiving node decide if it needs it.
type MsgInvTransaction struct {
baseMessage
TxIDs []*externalapi.DomainTransactionID
TxIDs []*daghash.TxID
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgInvTransaction) Command() MessageCommand {
// NewMsgInvTransaction returns a new kaspa TxInv message that conforms to
// the Message interface. See MsgInvTransaction for details.
func NewMsgInvTransaction(ids []*externalapi.DomainTransactionID) *MsgInvTransaction {
func NewMsgInvTransaction(ids []*daghash.TxID) *MsgInvTransaction {
return &MsgInvTransaction{
TxIDs: ids,
}

View File

@@ -6,12 +6,17 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPing tests the MsgPing API against the latest protocol version.
func TestPing(t *testing.T) {
// Ensure we get the same nonce back out.
nonce := uint64(0x61c2c5535902862)
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
msg := NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",

View File

@@ -6,11 +6,16 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPongLatest tests the MsgPong API against the latest protocol version.
func TestPongLatest(t *testing.T) {
nonce := uint64(0x1a05b581a5182c)
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: error generating nonce: %v", err)
}
msg := NewMsgPong(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",

View File

@@ -1,23 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgPruningPointHashMessage represents a kaspa PruningPointHash message
type MsgPruningPointHashMessage struct {
baseMessage
Hash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointHashMessage) Command() MessageCommand {
return CmdPruningPointHash
}
// NewPruningPointHashMessage returns a new kaspa PruningPointHash message
func NewPruningPointHashMessage(hash *externalapi.DomainHash) *MsgPruningPointHashMessage {
return &MsgPruningPointHashMessage{
Hash: hash,
}
}

View File

@@ -1,36 +0,0 @@
package appmessage
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// MsgPruningPointUTXOSetChunk represents a kaspa PruningPointUTXOSetChunk message
type MsgPruningPointUTXOSetChunk struct {
baseMessage
OutpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair
}
// Command returns the protocol command string for the message
func (msg *MsgPruningPointUTXOSetChunk) Command() MessageCommand {
return CmdPruningPointUTXOSetChunk
}
// NewMsgPruningPointUTXOSetChunk returns a new MsgPruningPointUTXOSetChunk.
func NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) *MsgPruningPointUTXOSetChunk {
return &MsgPruningPointUTXOSetChunk{
OutpointAndUTXOEntryPairs: outpointAndUTXOEntryPairs,
}
}
// OutpointAndUTXOEntryPair is an outpoint along with its
// respective UTXO entry
type OutpointAndUTXOEntryPair struct {
Outpoint *Outpoint
UTXOEntry *UTXOEntry
}
// UTXOEntry houses details about an individual transaction output in a UTXO
type UTXOEntry struct {
Amount uint64
ScriptPublicKey *externalapi.ScriptPublicKey
BlockBlueScore uint64
IsCoinbase bool
}

View File

@@ -5,7 +5,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MsgRequestAddresses implements the Message interface and represents a kaspa
@@ -17,7 +17,7 @@ import (
type MsgRequestAddresses struct {
baseMessage
IncludeAllSubnetworks bool
SubnetworkID *externalapi.DomainSubnetworkID
SubnetworkID *subnetworkid.SubnetworkID
}
// Command returns the protocol command string for the message. This is part
@@ -28,7 +28,7 @@ func (msg *MsgRequestAddresses) Command() MessageCommand {
// NewMsgRequestAddresses returns a new kaspa RequestAddresses message that conforms to the
// Message interface. See MsgRequestAddresses for details.
func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *externalapi.DomainSubnetworkID) *MsgRequestAddresses {
func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) *MsgRequestAddresses {
return &MsgRequestAddresses{
IncludeAllSubnetworks: includeAllSubnetworks,
SubnetworkID: subnetworkID,

View File

@@ -1,18 +1,17 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestBlockLocator implements the Message interface and represents a kaspa
// RequestBlockLocator message. It is used to request a block locator between low
// and high hash.
// RequestBlockLocator message. It is used to request a block locator between high
// and low hash.
// The locator is returned via a locator message (MsgBlockLocator).
type MsgRequestBlockLocator struct {
baseMessage
LowHash *externalapi.DomainHash
HighHash *externalapi.DomainHash
Limit uint32
HighHash *daghash.Hash
LowHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
@@ -24,10 +23,9 @@ func (msg *MsgRequestBlockLocator) Command() MessageCommand {
// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequestBlockLocator(lowHash, highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator {
func NewMsgRequestBlockLocator(highHash, lowHash *daghash.Hash) *MsgRequestBlockLocator {
return &MsgRequestBlockLocator{
LowHash: lowHash,
HighHash: highHash,
Limit: limit,
LowHash: lowHash,
}
}

View File

@@ -3,20 +3,20 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestRequestBlockLocator tests the MsgRequestBlockLocator API.
func TestRequestBlockLocator(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
highHash, err := externalapi.NewDomainHashFromString(hashStr)
highHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(9)
msg := NewMsgRequestBlockLocator(highHash, &externalapi.DomainHash{}, 0)
msg := NewMsgRequestBlockLocator(highHash, &daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v",
cmd, wantCmd)

View File

@@ -1,34 +0,0 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestHeaders implements the Message interface and represents a kaspa
// RequestHeaders message. It is used to request a list of blocks starting after the
// low hash and until the high hash.
type MsgRequestHeaders struct {
baseMessage
LowHash *externalapi.DomainHash
HighHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestHeaders) Command() MessageCommand {
return CmdRequestHeaders
}
// NewMsgRequstHeaders returns a new kaspa RequestHeaders message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequstHeaders(lowHash, highHash *externalapi.DomainHash) *MsgRequestHeaders {
return &MsgRequestHeaders{
LowHash: lowHash,
HighHash: highHash,
}
}

View File

@@ -1,15 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgRequestIBDBlocks implements the Message interface and represents a kaspa
// RequestIBDBlocks message. It is used to request blocks as part of the IBD
// protocol.
// RequestIBDBlocks message. It is used to request a list of blocks starting after the
// low hash and until the high hash.
type MsgRequestIBDBlocks struct {
baseMessage
Hashes []*externalapi.DomainHash
LowHash *daghash.Hash
HighHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
@@ -18,9 +23,12 @@ func (msg *MsgRequestIBDBlocks) Command() MessageCommand {
return CmdRequestIBDBlocks
}
// NewMsgRequestIBDBlocks returns a new MsgRequestIBDBlocks.
func NewMsgRequestIBDBlocks(hashes []*externalapi.DomainHash) *MsgRequestIBDBlocks {
// NewMsgRequstIBDBlocks returns a new kaspa RequestIBDBlocks message that conforms to the
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgRequstIBDBlocks(lowHash, highHash *daghash.Hash) *MsgRequestIBDBlocks {
return &MsgRequestIBDBlocks{
Hashes: hashes,
LowHash: lowHash,
HighHash: highHash,
}
}

View File

@@ -7,34 +7,34 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// TestRequstIBDBlocks tests the MsgRequestHeaders API.
// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API.
func TestRequstIBDBlocks(t *testing.T) {
hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0"
lowHash, err := externalapi.NewDomainHashFromString(hashStr)
lowHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
hashStr = "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
highHash, err := externalapi.NewDomainHashFromString(hashStr)
highHash, err := daghash.NewHashFromStr(hashStr)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Ensure we get the same data back out.
msg := NewMsgRequstHeaders(lowHash, highHash)
if !msg.HighHash.Equal(highHash) {
t.Errorf("NewMsgRequstHeaders: wrong high hash - got %v, want %v",
msg := NewMsgRequstIBDBlocks(lowHash, highHash)
if !msg.HighHash.IsEqual(highHash) {
t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v",
msg.HighHash, highHash)
}
// Ensure the command is expected value.
wantCmd := MessageCommand(4)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequstHeaders: wrong command - got %v want %v",
t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -1,22 +0,0 @@
package appmessage
// MsgRequestNextHeaders implements the Message interface and represents a kaspa
// RequestNextHeaders message. It is used to notify the IBD syncer peer to send
// more headers.
//
// This message has no payload.
type MsgRequestNextHeaders struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextHeaders) Command() MessageCommand {
return CmdRequestNextHeaders
}
// NewMsgRequestNextHeaders returns a new kaspa RequestNextHeaders message that conforms to the
// Message interface.
func NewMsgRequestNextHeaders() *MsgRequestNextHeaders {
return &MsgRequestNextHeaders{}
}

View File

@@ -0,0 +1,22 @@
package appmessage
// MsgRequestNextIBDBlocks implements the Message interface and represents a kaspa
// RequestNextIBDBlocks message. It is used to notify the IBD syncer peer to send
// more blocks.
//
// This message has no payload.
type MsgRequestNextIBDBlocks struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestNextIBDBlocks) Command() MessageCommand {
return CmdRequestNextIBDBlocks
}
// NewMsgRequestNextIBDBlocks returns a new kaspa RequestNextIBDBlocks message that conforms to the
// Message interface.
func NewMsgRequestNextIBDBlocks() *MsgRequestNextIBDBlocks {
return &MsgRequestNextIBDBlocks{}
}

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgRequestNextPruningPointUTXOSetChunk represents a kaspa RequestNextPruningPointUTXOSetChunk message
type MsgRequestNextPruningPointUTXOSetChunk struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestNextPruningPointUTXOSetChunk) Command() MessageCommand {
return CmdRequestNextPruningPointUTXOSetChunk
}
// NewMsgRequestNextPruningPointUTXOSetChunk returns a new MsgRequestNextPruningPointUTXOSetChunk.
func NewMsgRequestNextPruningPointUTXOSetChunk() *MsgRequestNextPruningPointUTXOSetChunk {
return &MsgRequestNextPruningPointUTXOSetChunk{}
}

View File

@@ -1,23 +0,0 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// MsgRequestPruningPointUTXOSetAndBlock represents a kaspa RequestPruningPointUTXOSetAndBlock message
type MsgRequestPruningPointUTXOSetAndBlock struct {
baseMessage
PruningPointHash *externalapi.DomainHash
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointUTXOSetAndBlock) Command() MessageCommand {
return CmdRequestPruningPointUTXOSetAndBlock
}
// NewMsgRequestPruningPointUTXOSetAndBlock returns a new MsgRequestPruningPointUTXOSetAndBlock
func NewMsgRequestPruningPointUTXOSetAndBlock(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSetAndBlock {
return &MsgRequestPruningPointUTXOSetAndBlock{
PruningPointHash: pruningPointHash,
}
}

View File

@@ -1,19 +1,19 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxRequestRelayBlocksHashes is the maximum number of hashes that can
// MsgRequestRelayBlocksHashes is the maximum number of hashes that can
// be in a single RequestRelayBlocks message.
const MaxRequestRelayBlocksHashes = MaxInvPerMsg
const MsgRequestRelayBlocksHashes = MaxInvPerMsg
// MsgRequestRelayBlocks implements the Message interface and represents a kaspa
// RequestRelayBlocks message. It is used to request blocks as part of the block
// relay protocol.
type MsgRequestRelayBlocks struct {
baseMessage
Hashes []*externalapi.DomainHash
Hashes []*daghash.Hash
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgRequestRelayBlocks) Command() MessageCommand {
// NewMsgRequestRelayBlocks returns a new kaspa RequestRelayBlocks message that conforms to
// the Message interface. See MsgRequestRelayBlocks for details.
func NewMsgRequestRelayBlocks(hashes []*externalapi.DomainHash) *MsgRequestRelayBlocks {
func NewMsgRequestRelayBlocks(hashes []*daghash.Hash) *MsgRequestRelayBlocks {
return &MsgRequestRelayBlocks{
Hashes: hashes,
}

View File

@@ -0,0 +1,21 @@
package appmessage
// MsgRequestSelectedTip implements the Message interface and represents a kaspa
// RequestSelectedTip message. It is used to request the selected tip of another peer.
//
// This message has no payload.
type MsgRequestSelectedTip struct {
baseMessage
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgRequestSelectedTip) Command() MessageCommand {
return CmdRequestSelectedTip
}
// NewMsgRequestSelectedTip returns a new kaspa RequestSelectedTip message that conforms to the
// Message interface.
func NewMsgRequestSelectedTip() *MsgRequestSelectedTip {
return &MsgRequestSelectedTip{}
}

View File

@@ -0,0 +1,20 @@
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package appmessage
import (
"testing"
)
// TestRequestSelectedTip tests the MsgRequestSelectedTip API.
func TestRequestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(12)
msg := NewMsgRequestSelectedTip()
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgRequestSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -1,7 +1,7 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MaxInvPerRequestTransactionsMsg is the maximum number of hashes that can
@@ -13,7 +13,7 @@ const MaxInvPerRequestTransactionsMsg = MaxInvPerMsg
// transactions relay protocol.
type MsgRequestTransactions struct {
baseMessage
IDs []*externalapi.DomainTransactionID
IDs []*daghash.TxID
}
// Command returns the protocol command string for the message. This is part
@@ -24,7 +24,7 @@ func (msg *MsgRequestTransactions) Command() MessageCommand {
// NewMsgRequestTransactions returns a new kaspa RequestTransactions message that conforms to
// the Message interface. See MsgRequestTransactions for details.
func NewMsgRequestTransactions(ids []*externalapi.DomainTransactionID) *MsgRequestTransactions {
func NewMsgRequestTransactions(ids []*daghash.TxID) *MsgRequestTransactions {
return &MsgRequestTransactions{
IDs: ids,
}

View File

@@ -0,0 +1,28 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgSelectedTip implements the Message interface and represents a kaspa
// selectedtip message. It is used to answer getseltip messages and tell
// the asking peer what is the selected tip of this peer.
type MsgSelectedTip struct {
baseMessage
// The selected tip hash of the generator of the message.
SelectedTipHash *daghash.Hash
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgSelectedTip) Command() MessageCommand {
return CmdSelectedTip
}
// NewMsgSelectedTip returns a new kaspa selectedtip message that conforms to the
// Message interface.
func NewMsgSelectedTip(selectedTipHash *daghash.Hash) *MsgSelectedTip {
return &MsgSelectedTip{
SelectedTipHash: selectedTipHash,
}
}

View File

@@ -0,0 +1,18 @@
package appmessage
import (
"github.com/kaspanet/kaspad/util/daghash"
"testing"
)
// TestSelectedTip tests the MsgSelectedTip API.
func TestSelectedTip(t *testing.T) {
// Ensure the command is expected value.
wantCmd := MessageCommand(11)
msg := NewMsgSelectedTip(&daghash.ZeroHash)
if cmd := msg.Command(); cmd != wantCmd {
t.Errorf("NewMsgSelectedTip: wrong command - got %v want %v",
cmd, wantCmd)
}
}

View File

@@ -5,14 +5,14 @@
package appmessage
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
)
// MsgTransactionNotFound defines a kaspa TransactionNotFound message which is sent in response to
// a RequestTransactions message if any of the requested data in not available on the peer.
type MsgTransactionNotFound struct {
baseMessage
ID *externalapi.DomainTransactionID
ID *daghash.TxID
}
// Command returns the protocol command string for the message. This is part
@@ -23,7 +23,7 @@ func (msg *MsgTransactionNotFound) Command() MessageCommand {
// NewMsgTransactionNotFound returns a new kaspa transactionsnotfound message that conforms to the
// Message interface. See MsgTransactionNotFound for details.
func NewMsgTransactionNotFound(id *externalapi.DomainTransactionID) *MsgTransactionNotFound {
func NewMsgTransactionNotFound(id *daghash.TxID) *MsgTransactionNotFound {
return &MsgTransactionNotFound{
ID: id,
}

View File

@@ -6,21 +6,49 @@ package appmessage
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
"fmt"
"io"
"math"
"strconv"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
const (
// TxVersion is the current latest supported transaction version.
TxVersion = 1
// MaxTxInSequenceNum is the maximum sequence number the sequence field
// of a transaction input can be.
MaxTxInSequenceNum uint64 = math.MaxUint64
// MaxPrevOutIndex is the maximum index the index field of a previous
// outpoint can be.
MaxPrevOutIndex uint32 = 0xffffffff
// SequenceLockTimeDisabled is a flag that if set on a transaction
// input's sequence number, the sequence number will not be interpreted
// as a relative locktime.
SequenceLockTimeDisabled = 1 << 31
// SequenceLockTimeIsSeconds is a flag that if set on a transaction
// input's sequence number, the relative locktime has units of 512
// seconds.
SequenceLockTimeIsSeconds = 1 << 22
// SequenceLockTimeMask is a mask that extracts the relative locktime
// when masked against the transaction input sequence number.
SequenceLockTimeMask = 0x0000ffff
// SequenceLockTimeGranularity is the defined time based granularity
// for milliseconds-based relative time locks. When converting from milliseconds
// to a sequence number, the value is right shifted by this amount,
// therefore the granularity of relative time locks in 524288 or 2^19
// seconds. Enforced relative lock times are multiples of 524288 milliseconds.
SequenceLockTimeGranularity = 19
// defaultTxInOutAlloc is the default size used for the backing array for
// transaction inputs and outputs. The array will dynamically grow as needed,
// but this figure is intended to provide enough space for the number of
@@ -31,15 +59,15 @@ const (
// minTxInPayload is the minimum payload size for a transaction input.
// PreviousOutpoint.TxID + PreviousOutpoint.Index 4 bytes + Varint for
// SignatureScript length 1 byte + Sequence 4 bytes.
minTxInPayload = 9 + externalapi.DomainHashSize
minTxInPayload = 9 + daghash.HashSize
// maxTxInPerMessage is the maximum number of transactions inputs that
// a transaction which fits into a message could possibly have.
maxTxInPerMessage = (MaxMessagePayload / minTxInPayload) + 1
// MinTxOutPayload is the minimum payload size for a transaction output.
// Value 8 bytes + version 2 bytes + Varint for ScriptPublicKey length 1 byte.
MinTxOutPayload = 11
// Value 8 bytes + Varint for ScriptPubKey length 1 byte.
MinTxOutPayload = 9
// maxTxOutPerMessage is the maximum number of transactions outputs that
// a transaction which fits into a message could possibly have.
@@ -53,18 +81,102 @@ const (
// number of transaction outputs 1 byte + LockTime 4 bytes + min input
// payload + min output payload.
minTxPayload = 10
// freeListMaxScriptSize is the size of each buffer in the free list
// that is used for deserializing scripts from the appmessage before they are
// concatenated into a single contiguous buffers. This value was chosen
// because it is slightly more than twice the size of the vast majority
// of all "standard" scripts. Larger scripts are still deserialized
// properly as the free list will simply be bypassed for them.
freeListMaxScriptSize = 512
// freeListMaxItems is the number of buffers to keep in the free list
// to use for script deserialization. This value allows up to 100
// scripts per transaction being simultaneously deserialized by 125
// peers. Thus, the peak usage of the free list is 12,500 * 512 =
// 6,400,000 bytes.
freeListMaxItems = 12500
)
// txEncoding is a bitmask defining which transaction fields we
// want to encode and which to ignore.
type txEncoding uint8
const (
txEncodingFull txEncoding = 0
txEncodingExcludePayload txEncoding = 1 << iota
txEncodingExcludeSignatureScript
)
// scriptFreeList defines a free list of byte slices (up to the maximum number
// defined by the freeListMaxItems constant) that have a cap according to the
// freeListMaxScriptSize constant. It is used to provide temporary buffers for
// deserializing scripts in order to greatly reduce the number of allocations
// required.
//
// The caller can obtain a buffer from the free list by calling the Borrow
// function and should return it via the Return function when done using it.
type scriptFreeList chan []byte
// Borrow returns a byte slice from the free list with a length according the
// provided size. A new buffer is allocated if there are any items available.
//
// When the size is larger than the max size allowed for items on the free list
// a new buffer of the appropriate size is allocated and returned. It is safe
// to attempt to return said buffer via the Return function as it will be
// ignored and allowed to go the garbage collector.
func (c scriptFreeList) Borrow(size uint64) []byte {
if size > freeListMaxScriptSize {
return make([]byte, size)
}
var buf []byte
select {
case buf = <-c:
default:
buf = make([]byte, freeListMaxScriptSize)
}
return buf[:size]
}
// Return puts the provided byte slice back on the free list when it has a cap
// of the expected length. The buffer is expected to have been obtained via
// the Borrow function. Any slices that are not of the appropriate size, such
// as those whose size is greater than the largest allowed free list item size
// are simply ignored so they can go to the garbage collector.
func (c scriptFreeList) Return(buf []byte) {
// Ignore any buffers returned that aren't the expected size for the
// free list.
if cap(buf) != freeListMaxScriptSize {
return
}
// Return the buffer to the free list when it's not full. Otherwise let
// it be garbage collected.
select {
case c <- buf:
default:
// Let it go to the garbage collector.
}
}
// Create the concurrent safe free list to use for script deserialization. As
// previously described, this free list is maintained to significantly reduce
// the number of allocations.
var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems)
// Outpoint defines a kaspa data type that is used to track previous
// transaction outputs.
type Outpoint struct {
TxID externalapi.DomainTransactionID
TxID daghash.TxID
Index uint32
}
// NewOutpoint returns a new kaspa transaction outpoint point with the
// provided hash and index.
func NewOutpoint(txID *externalapi.DomainTransactionID, index uint32) *Outpoint {
func NewOutpoint(txID *daghash.TxID, index uint32) *Outpoint {
return &Outpoint{
TxID: *txID,
Index: index,
@@ -79,9 +191,9 @@ func (o Outpoint) String() string {
// maximum message payload may increase in the future and this
// optimization may go unnoticed, so allocate space for 10 decimal
// digits, which will fit any uint32.
buf := make([]byte, 2*externalapi.DomainHashSize+1, 2*externalapi.DomainHashSize+1+10)
buf := make([]byte, 2*daghash.HashSize+1, 2*daghash.HashSize+1+10)
copy(buf, o.TxID.String())
buf[2*externalapi.DomainHashSize] = ':'
buf[2*daghash.HashSize] = ':'
buf = strconv.AppendUint(buf, uint64(o.Index), 10)
return string(buf)
}
@@ -93,26 +205,55 @@ type TxIn struct {
Sequence uint64
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction input.
func (t *TxIn) SerializeSize() int {
return t.serializeSize(txEncodingFull)
}
func (t *TxIn) serializeSize(encodingFlags txEncoding) int {
// Outpoint ID 32 bytes + Outpoint Index 4 bytes + Sequence 8 bytes +
// serialized varint size for the length of SignatureScript +
// SignatureScript bytes.
return 44 + serializeSignatureScriptSize(t.SignatureScript, encodingFlags)
}
func serializeSignatureScriptSize(signatureScript []byte, encodingFlags txEncoding) int {
if encodingFlags&txEncodingExcludeSignatureScript != txEncodingExcludeSignatureScript {
return VarIntSerializeSize(uint64(len(signatureScript))) +
len(signatureScript)
}
return VarIntSerializeSize(0)
}
// NewTxIn returns a new kaspa transaction input with the provided
// previous outpoint point and signature script with a default sequence of
// MaxTxInSequenceNum.
func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64) *TxIn {
func NewTxIn(prevOut *Outpoint, signatureScript []byte) *TxIn {
return &TxIn{
PreviousOutpoint: *prevOut,
SignatureScript: signatureScript,
Sequence: sequence,
Sequence: MaxTxInSequenceNum,
}
}
// TxOut defines a kaspa transaction output.
type TxOut struct {
Value uint64
ScriptPubKey *externalapi.ScriptPublicKey
ScriptPubKey []byte
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction output.
func (t *TxOut) SerializeSize() int {
// Value 8 bytes + serialized varint size for the length of ScriptPubKey +
// ScriptPubKey bytes.
return 8 + VarIntSerializeSize(uint64(len(t.ScriptPubKey))) + len(t.ScriptPubKey)
}
// NewTxOut returns a new kaspa transaction output with the provided
// transaction value and public key script.
func NewTxOut(value uint64, scriptPubKey *externalapi.ScriptPublicKey) *TxOut {
func NewTxOut(value uint64, scriptPubKey []byte) *TxOut {
return &TxOut{
Value: value,
ScriptPubKey: scriptPubKey,
@@ -127,13 +268,13 @@ func NewTxOut(value uint64, scriptPubKey *externalapi.ScriptPublicKey) *TxOut {
// inputs and outputs.
type MsgTx struct {
baseMessage
Version uint16
Version int32
TxIn []*TxIn
TxOut []*TxOut
LockTime uint64
SubnetworkID externalapi.DomainSubnetworkID
SubnetworkID subnetworkid.SubnetworkID
Gas uint64
PayloadHash externalapi.DomainHash
PayloadHash *daghash.Hash
Payload []byte
}
@@ -154,17 +295,41 @@ func (msg *MsgTx) AddTxOut(to *TxOut) {
// value and reference the relevant block id, instead of previous transaction id.
func (msg *MsgTx) IsCoinBase() bool {
// A coinbase transaction must have subnetwork id SubnetworkIDCoinbase
return msg.SubnetworkID == subnetworks.SubnetworkIDCoinbase
return msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
}
// TxHash generates the Hash for the transaction.
func (msg *MsgTx) TxHash() *externalapi.DomainHash {
return consensushashing.TransactionHash(MsgTxToDomainTransaction(msg))
func (msg *MsgTx) TxHash() *daghash.Hash {
// Encode the transaction and calculate double sha256 on the result.
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, txEncodingExcludePayload)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxHash() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
hash := writer.Finalize()
return &hash
}
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
func (msg *MsgTx) TxID() *externalapi.DomainTransactionID {
return consensushashing.TransactionID(MsgTxToDomainTransaction(msg))
func (msg *MsgTx) TxID() *daghash.TxID {
// Encode the transaction, replace signature script with zeroes, cut off
// payload and calculate double sha256 on the result.
var encodingFlags txEncoding
if !msg.IsCoinBase() {
encodingFlags = txEncodingExcludeSignatureScript | txEncodingExcludePayload
}
writer := daghash.NewDoubleHashWriter()
err := msg.serialize(writer, encodingFlags)
if err != nil {
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
// and we assume we never construct malformed transactions.
panic(fmt.Sprintf("TxID() failed. this should never fail for structurally-valid transactions. err: %+v", err))
}
txID := daghash.TxID(writer.Finalize())
return &txID
}
// Copy creates a deep copy of a transaction so that the original does not get
@@ -192,7 +357,7 @@ func (msg *MsgTx) Copy() *MsgTx {
// Deep copy the old previous outpoint.
oldOutpoint := oldTxIn.PreviousOutpoint
newOutpoint := Outpoint{}
newOutpoint.TxID = oldOutpoint.TxID
newOutpoint.TxID.SetBytes(oldOutpoint.TxID[:])
newOutpoint.Index = oldOutpoint.Index
// Deep copy the old signature script.
@@ -217,20 +382,20 @@ func (msg *MsgTx) Copy() *MsgTx {
// Deep copy the old TxOut data.
for _, oldTxOut := range msg.TxOut {
// Deep copy the old ScriptPublicKey
var newScript externalapi.ScriptPublicKey
// Deep copy the old ScriptPubKey
var newScript []byte
oldScript := oldTxOut.ScriptPubKey
oldScriptLen := len(oldScript.Script)
oldScriptLen := len(oldScript)
if oldScriptLen > 0 {
newScript = externalapi.ScriptPublicKey{Script: make([]byte, oldScriptLen), Version: oldScript.Version}
copy(newScript.Script, oldScript.Script[:oldScriptLen])
newScript = make([]byte, oldScriptLen)
copy(newScript, oldScript[:oldScriptLen])
}
// Create new txOut with the deep copied data and append it to
// new Tx.
newTxOut := TxOut{
Value: oldTxOut.Value,
ScriptPubKey: &newScript,
ScriptPubKey: newScript,
}
newTx.TxOut = append(newTx.TxOut, &newTxOut)
}
@@ -238,6 +403,368 @@ func (msg *MsgTx) Copy() *MsgTx {
return &newTx
}
// KaspaDecode decodes r using the kaspa protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the appmessage.
func (msg *MsgTx) KaspaDecode(r io.Reader, pver uint32) error {
version, err := binaryserializer.Uint32(r, littleEndian)
if err != nil {
return err
}
msg.Version = int32(version)
count, err := ReadVarInt(r)
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxInPerMessage) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxInPerMessage)
return messageError("MsgTx.KaspaDecode", str)
}
// returnScriptBuffers is a closure that returns any script buffers that
// were borrowed from the pool when there are any deserialization
// errors. This is only valid to call before the final step which
// replaces the scripts with the location in a contiguous buffer and
// returns them.
returnScriptBuffers := func() {
for _, txIn := range msg.TxIn {
if txIn == nil || txIn.SignatureScript == nil {
continue
}
scriptPool.Return(txIn.SignatureScript)
}
for _, txOut := range msg.TxOut {
if txOut == nil || txOut.ScriptPubKey == nil {
continue
}
scriptPool.Return(txOut.ScriptPubKey)
}
}
// Deserialize the inputs.
var totalScriptSize uint64
txIns := make([]TxIn, count)
msg.TxIn = make([]*TxIn, count)
for i := uint64(0); i < count; i++ {
// The pointer is set now in case a script buffer is borrowed
// and needs to be returned to the pool on error.
ti := &txIns[i]
msg.TxIn[i] = ti
err = readTxIn(r, pver, msg.Version, ti)
if err != nil {
returnScriptBuffers()
return err
}
totalScriptSize += uint64(len(ti.SignatureScript))
}
count, err = ReadVarInt(r)
if err != nil {
returnScriptBuffers()
return err
}
// Prevent more output transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxOutPerMessage) {
returnScriptBuffers()
str := fmt.Sprintf("too many output transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxOutPerMessage)
return messageError("MsgTx.KaspaDecode", str)
}
// Deserialize the outputs.
txOuts := make([]TxOut, count)
msg.TxOut = make([]*TxOut, count)
for i := uint64(0); i < count; i++ {
// The pointer is set now in case a script buffer is borrowed
// and needs to be returned to the pool on error.
to := &txOuts[i]
msg.TxOut[i] = to
err = readTxOut(r, pver, msg.Version, to)
if err != nil {
returnScriptBuffers()
return err
}
totalScriptSize += uint64(len(to.ScriptPubKey))
}
lockTime, err := binaryserializer.Uint64(r, littleEndian)
msg.LockTime = lockTime
if err != nil {
returnScriptBuffers()
return err
}
_, err = io.ReadFull(r, msg.SubnetworkID[:])
if err != nil {
returnScriptBuffers()
return err
}
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
msg.Gas, err = binaryserializer.Uint64(r, littleEndian)
if err != nil {
returnScriptBuffers()
return err
}
var payloadHash daghash.Hash
err = ReadElement(r, &payloadHash)
if err != nil {
returnScriptBuffers()
return err
}
msg.PayloadHash = &payloadHash
payloadLength, err := ReadVarInt(r)
if err != nil {
returnScriptBuffers()
return err
}
msg.Payload = make([]byte, payloadLength)
_, err = io.ReadFull(r, msg.Payload)
if err != nil {
returnScriptBuffers()
return err
}
}
// Create a single allocation to house all of the scripts and set each
// input signature script and output public key script to the
// appropriate subslice of the overall contiguous buffer. Then, return
// each individual script buffer back to the pool so they can be reused
// for future deserializations. This is done because it significantly
// reduces the number of allocations the garbage collector needs to
// track, which in turn improves performance and drastically reduces the
// amount of runtime overhead that would otherwise be needed to keep
// track of millions of small allocations.
//
// NOTE: It is no longer valid to call the returnScriptBuffers closure
// after these blocks of code run because it is already done and the
// scripts in the transaction inputs and outputs no longer point to the
// buffers.
var offset uint64
scripts := make([]byte, totalScriptSize)
for i := 0; i < len(msg.TxIn); i++ {
// Copy the signature script into the contiguous buffer at the
// appropriate offset.
signatureScript := msg.TxIn[i].SignatureScript
copy(scripts[offset:], signatureScript)
// Reset the signature script of the transaction input to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(signatureScript))
end := offset + scriptSize
msg.TxIn[i].SignatureScript = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(signatureScript)
}
for i := 0; i < len(msg.TxOut); i++ {
// Copy the public key script into the contiguous buffer at the
// appropriate offset.
scriptPubKey := msg.TxOut[i].ScriptPubKey
copy(scripts[offset:], scriptPubKey)
// Reset the public key script of the transaction output to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(scriptPubKey))
end := offset + scriptSize
msg.TxOut[i].ScriptPubKey = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(scriptPubKey)
}
return nil
}
// Deserialize decodes a transaction from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field in the transaction. This function differs from KaspaDecode
// in that KaspaDecode decodes from the kaspa appmessage protocol as it was sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaDecode.
return msg.KaspaDecode(r, 0)
}
// KaspaEncode encodes the receiver to w using the kaspa protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the appmessage.
func (msg *MsgTx) KaspaEncode(w io.Writer, pver uint32) error {
return msg.encode(w, pver, txEncodingFull)
}
func (msg *MsgTx) encode(w io.Writer, pver uint32, encodingFlags txEncoding) error {
err := binaryserializer.PutUint32(w, littleEndian, uint32(msg.Version))
if err != nil {
return err
}
count := uint64(len(msg.TxIn))
err = WriteVarInt(w, count)
if err != nil {
return err
}
for _, ti := range msg.TxIn {
err = writeTxIn(w, pver, msg.Version, ti, encodingFlags)
if err != nil {
return err
}
}
count = uint64(len(msg.TxOut))
err = WriteVarInt(w, count)
if err != nil {
return err
}
for _, to := range msg.TxOut {
err = WriteTxOut(w, pver, msg.Version, to)
if err != nil {
return err
}
}
err = binaryserializer.PutUint64(w, littleEndian, msg.LockTime)
if err != nil {
return err
}
_, err = w.Write(msg.SubnetworkID[:])
if err != nil {
return err
}
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
if msg.SubnetworkID.IsBuiltIn() && msg.Gas != 0 {
str := "Transactions from built-in should have 0 gas"
return messageError("MsgTx.KaspaEncode", str)
}
err = binaryserializer.PutUint64(w, littleEndian, msg.Gas)
if err != nil {
return err
}
err = WriteElement(w, msg.PayloadHash)
if err != nil {
return err
}
if encodingFlags&txEncodingExcludePayload != txEncodingExcludePayload {
err = WriteVarInt(w, uint64(len(msg.Payload)))
w.Write(msg.Payload)
} else {
err = WriteVarInt(w, 0)
}
if err != nil {
return err
}
} else if msg.Payload != nil {
str := "Transactions from native subnetwork should have <nil> payload"
return messageError("MsgTx.KaspaEncode", str)
} else if msg.PayloadHash != nil {
str := "Transactions from native subnetwork should have <nil> payload hash"
return messageError("MsgTx.KaspaEncode", str)
} else if msg.Gas != 0 {
str := "Transactions from native subnetwork should have 0 gas"
return messageError("MsgTx.KaspaEncode", str)
}
return nil
}
// Serialize encodes the transaction to w using a format that suitable for
// long-term storage such as a database while respecting the Version field in
// the transaction. This function differs from KaspaEncode in that KaspaEncode
// encodes the transaction to the kaspa appmessage protocol in order to be sent
// across the network. The appmessage encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Serialize(w io.Writer) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of KaspaEncode.
return msg.KaspaEncode(w, 0)
}
func (msg *MsgTx) serialize(w io.Writer, encodingFlags txEncoding) error {
// At the current time, there is no difference between the appmessage encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of `encode`.
return msg.encode(w, 0, encodingFlags)
}
// SerializeSize returns the number of bytes it would take to serialize
// the transaction.
func (msg *MsgTx) SerializeSize() int {
return msg.serializeSize(txEncodingFull)
}
// SerializeSize returns the number of bytes it would take to serialize
// the transaction.
func (msg *MsgTx) serializeSize(encodingFlags txEncoding) int {
// Version 4 bytes + LockTime 8 bytes + SubnetworkID 20
// bytes + Serialized varint size for the number of transaction
// inputs and outputs.
n := 32 + VarIntSerializeSize(uint64(len(msg.TxIn))) +
VarIntSerializeSize(uint64(len(msg.TxOut)))
if !msg.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
// Gas 8 bytes
n += 8
// PayloadHash
n += daghash.HashSize
// Serialized varint size for the length of the payload
if encodingFlags&txEncodingExcludePayload != txEncodingExcludePayload {
n += VarIntSerializeSize(uint64(len(msg.Payload)))
n += len(msg.Payload)
} else {
n += VarIntSerializeSize(0)
}
}
for _, txIn := range msg.TxIn {
n += txIn.serializeSize(encodingFlags)
}
for _, txOut := range msg.TxOut {
n += txOut.SerializeSize()
}
return n
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTx) Command() MessageCommand {
@@ -250,14 +777,52 @@ func (msg *MsgTx) MaxPayloadLength(pver uint32) uint32 {
return MaxMessagePayload
}
// ScriptPubKeyLocs returns a slice containing the start of each public key script
// within the raw serialized transaction. The caller can easily obtain the
// length of each script by using len on the script available via the
// appropriate transaction output entry.
func (msg *MsgTx) ScriptPubKeyLocs() []int {
numTxOut := len(msg.TxOut)
if numTxOut == 0 {
return nil
}
// The starting offset in the serialized transaction of the first
// transaction output is:
//
// Version 4 bytes + serialized varint size for the number of
// transaction inputs and outputs + serialized size of each transaction
// input.
n := 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) +
VarIntSerializeSize(uint64(numTxOut))
for _, txIn := range msg.TxIn {
n += txIn.SerializeSize()
}
// Calculate and set the appropriate offset for each public key script.
scriptPubKeyLocs := make([]int, numTxOut)
for i, txOut := range msg.TxOut {
// The offset of the script in the transaction output is:
//
// Value 8 bytes + serialized varint size for the length of
// ScriptPubKey.
n += 8 + VarIntSerializeSize(uint64(len(txOut.ScriptPubKey)))
scriptPubKeyLocs[i] = n
n += len(txOut.ScriptPubKey)
}
return scriptPubKeyLocs
}
// IsSubnetworkCompatible return true iff subnetworkID is one or more of the following:
// 1. The SupportsAll subnetwork (full node)
// 2. The native subnetwork
// 3. The transaction's subnetwork
func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *externalapi.DomainSubnetworkID) bool {
func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *subnetworkid.SubnetworkID) bool {
return subnetworkID == nil ||
subnetworkID.Equal(&subnetworks.SubnetworkIDNative) ||
subnetworkID.Equal(&msg.SubnetworkID)
subnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) ||
subnetworkID.IsEqual(&msg.SubnetworkID)
}
// newMsgTx returns a new tx message that conforms to the Message interface.
@@ -269,7 +834,7 @@ func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *externalapi.DomainSubnetw
// The payload hash is calculated automatically according to provided payload.
// Also, the lock time is set to zero to indicate the transaction is valid
// immediately as opposed to some time in future.
func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *externalapi.DomainSubnetworkID,
func newMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkID *subnetworkid.SubnetworkID,
gas uint64, payload []byte, lockTime uint64) *MsgTx {
if txIn == nil {
@@ -280,9 +845,9 @@ func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *extern
txOut = make([]*TxOut, 0, defaultTxInOutAlloc)
}
var payloadHash externalapi.DomainHash
if *subnetworkID != subnetworks.SubnetworkIDNative {
payloadHash = *hashes.PayloadHash(payload)
var payloadHash *daghash.Hash
if !subnetworkID.IsEqual(subnetworkid.SubnetworkIDNative) {
payloadHash = daghash.DoubleHashP(payload)
}
return &MsgTx{
@@ -298,12 +863,12 @@ func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *extern
}
// NewNativeMsgTx returns a new tx message in the native subnetwork
func NewNativeMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut) *MsgTx {
return newMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDNative, 0, nil, 0)
func NewNativeMsgTx(version int32, txIn []*TxIn, txOut []*TxOut) *MsgTx {
return newMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDNative, 0, nil, 0)
}
// NewSubnetworkMsgTx returns a new tx message in the specified subnetwork with specified gas and payload
func NewSubnetworkMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *externalapi.DomainSubnetworkID,
func NewSubnetworkMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, subnetworkID *subnetworkid.SubnetworkID,
gas uint64, payload []byte) *MsgTx {
return newMsgTx(version, txIn, txOut, subnetworkID, gas, payload, 0)
@@ -312,14 +877,128 @@ func NewSubnetworkMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetwork
// NewNativeMsgTxWithLocktime returns a new tx message in the native subnetwork with a locktime.
//
// See newMsgTx for further documntation of the parameters
func NewNativeMsgTxWithLocktime(version uint16, txIn []*TxIn, txOut []*TxOut, locktime uint64) *MsgTx {
return newMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDNative, 0, nil, locktime)
func NewNativeMsgTxWithLocktime(version int32, txIn []*TxIn, txOut []*TxOut, locktime uint64) *MsgTx {
return newMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDNative, 0, nil, locktime)
}
// NewRegistryMsgTx creates a new MsgTx that registers a new subnetwork
func NewRegistryMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, gasLimit uint64) *MsgTx {
func NewRegistryMsgTx(version int32, txIn []*TxIn, txOut []*TxOut, gasLimit uint64) *MsgTx {
payload := make([]byte, 8)
binary.LittleEndian.PutUint64(payload, gasLimit)
return NewSubnetworkMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDRegistry, 0, payload)
return NewSubnetworkMsgTx(version, txIn, txOut, subnetworkid.SubnetworkIDRegistry, 0, payload)
}
// readOutpoint reads the next sequence of bytes from r as an Outpoint.
func readOutpoint(r io.Reader, pver uint32, version int32, op *Outpoint) error {
_, err := io.ReadFull(r, op.TxID[:])
if err != nil {
return err
}
op.Index, err = binaryserializer.Uint32(r, littleEndian)
return err
}
// writeOutpoint encodes op to the kaspa protocol encoding for an Outpoint
// to w.
func writeOutpoint(w io.Writer, pver uint32, version int32, op *Outpoint) error {
_, err := w.Write(op.TxID[:])
if err != nil {
return err
}
return binaryserializer.PutUint32(w, littleEndian, op.Index)
}
// readScript reads a variable length byte array that represents a transaction
// script. It is encoded as a varInt containing the length of the array
// followed by the bytes themselves. An error is returned if the length is
// greater than the passed maxAllowed parameter which helps protect against
// memory exhaustion attacks and forced panics through malformed messages. The
// fieldName parameter is only used for the error message so it provides more
// context in the error.
func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) {
count, err := ReadVarInt(r)
if err != nil {
return nil, err
}
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
str := fmt.Sprintf("%s is larger than the max allowed size "+
"[count %d, max %d]", fieldName, count, maxAllowed)
return nil, messageError("readScript", str)
}
b := scriptPool.Borrow(count)
_, err = io.ReadFull(r, b)
if err != nil {
scriptPool.Return(b)
return nil, err
}
return b, nil
}
// readTxIn reads the next sequence of bytes from r as a transaction input
// (TxIn).
func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error {
err := readOutpoint(r, pver, version, &ti.PreviousOutpoint)
if err != nil {
return err
}
ti.SignatureScript, err = readScript(r, pver, MaxMessagePayload,
"transaction input signature script")
if err != nil {
return err
}
return ReadElement(r, &ti.Sequence)
}
// writeTxIn encodes ti to the kaspa protocol encoding for a transaction
// input (TxIn) to w.
func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn, encodingFlags txEncoding) error {
err := writeOutpoint(w, pver, version, &ti.PreviousOutpoint)
if err != nil {
return err
}
if encodingFlags&txEncodingExcludeSignatureScript != txEncodingExcludeSignatureScript {
err = WriteVarBytes(w, pver, ti.SignatureScript)
} else {
err = WriteVarBytes(w, pver, []byte{})
}
if err != nil {
return err
}
return binaryserializer.PutUint64(w, littleEndian, ti.Sequence)
}
// readTxOut reads the next sequence of bytes from r as a transaction output
// (TxOut).
func readTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error {
err := ReadElement(r, &to.Value)
if err != nil {
return err
}
to.ScriptPubKey, err = readScript(r, pver, MaxMessagePayload,
"transaction output public key script")
return err
}
// WriteTxOut encodes to into the kaspa protocol encoding for a transaction
// output (TxOut) to w.
func WriteTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error {
err := binaryserializer.PutUint64(w, littleEndian, uint64(to.Value))
if err != nil {
return err
}
return WriteVarBytes(w, pver, to.ScriptPubKey)
}

View File

@@ -7,17 +7,16 @@ package appmessage
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"io"
"math"
"reflect"
"testing"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
"unsafe"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// TestTx tests the MsgTx API.
@@ -25,7 +24,7 @@ func TestTx(t *testing.T) {
pver := ProtocolVersion
txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
txID, err := transactionid.FromString(txIDStr)
txID, err := daghash.NewTxIDFromStr(txIDStr)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
}
@@ -52,7 +51,7 @@ func TestTx(t *testing.T) {
// testing package functionality.
prevOutIndex := uint32(1)
prevOut := NewOutpoint(txID, prevOutIndex)
if !prevOut.TxID.Equal(txID) {
if !prevOut.TxID.IsEqual(txID) {
t.Errorf("NewOutpoint: wrong ID - got %v, want %v",
spew.Sprint(&prevOut.TxID), spew.Sprint(txID))
}
@@ -68,7 +67,7 @@ func TestTx(t *testing.T) {
// Ensure we get the same transaction input back out.
sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}
txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum)
txIn := NewTxIn(prevOut, sigScript)
if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) {
t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v",
spew.Sprint(&txIn.PreviousOutpoint),
@@ -82,28 +81,26 @@ func TestTx(t *testing.T) {
// Ensure we get the same transaction output back out.
txValue := uint64(5000000000)
scriptPubKey := &externalapi.ScriptPublicKey{
Script: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
Version: 0}
scriptPubKey := []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
}
txOut := NewTxOut(txValue, scriptPubKey)
if txOut.Value != txValue {
t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v",
txOut.Value, txValue)
}
if !bytes.Equal(txOut.ScriptPubKey.Script, scriptPubKey.Script) {
if !bytes.Equal(txOut.ScriptPubKey, scriptPubKey) {
t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v",
spew.Sdump(txOut.ScriptPubKey),
spew.Sdump(scriptPubKey))
@@ -133,21 +130,17 @@ func TestTx(t *testing.T) {
// TestTxHash tests the ability to generate the hash of a transaction accurately.
func TestTxHashAndID(t *testing.T) {
txHash1Str := "4bee9ee495bd93a755de428376bd582a2bb6ec37c041753b711c0606d5745c13"
txID1Str := "f868bd20e816256b80eac976821be4589d24d21141bd1cec6e8005d0c16c6881"
wantTxID1, err := transactionid.FromString(txID1Str)
txID1Str := "edca872f27279674c7a52192b32fd68b8b8be714bfea52d98b2c3c86c30e85c6"
wantTxID1, err := daghash.NewTxIDFromStr(txID1Str)
if err != nil {
t.Fatalf("NewTxIDFromStr: %v", err)
}
wantTxHash1, err := transactionid.FromString(txHash1Str)
if err != nil {
t.Fatalf("NewTxIDFromStr: %v", err)
t.Errorf("NewTxIDFromStr: %v", err)
return
}
// A coinbase transaction
txIn := &TxIn{
PreviousOutpoint: Outpoint{
TxID: externalapi.DomainTransactionID{},
TxID: daghash.TxID{},
Index: math.MaxUint32,
},
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
@@ -155,7 +148,7 @@ func TestTxHashAndID(t *testing.T) {
}
txOut := &TxOut{
Value: 5000000000,
ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
@@ -167,33 +160,33 @@ func TestTxHashAndID(t *testing.T) {
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
}, Version: 0},
},
}
tx1 := NewSubnetworkMsgTx(0, []*TxIn{txIn}, []*TxOut{txOut}, &subnetworks.SubnetworkIDCoinbase, 0, nil)
tx1 := NewSubnetworkMsgTx(1, []*TxIn{txIn}, []*TxOut{txOut}, subnetworkid.SubnetworkIDCoinbase, 0, nil)
// Ensure the hash produced is expected.
tx1Hash := tx1.TxHash()
if *tx1Hash != (externalapi.DomainHash)(*wantTxHash1) {
if !tx1Hash.IsEqual((*daghash.Hash)(wantTxID1)) {
t.Errorf("TxHash: wrong hash - got %v, want %v",
spew.Sprint(tx1Hash), spew.Sprint(wantTxHash1))
spew.Sprint(tx1Hash), spew.Sprint(wantTxID1))
}
// Ensure the TxID for coinbase transaction is the same as TxHash.
tx1ID := tx1.TxID()
if !tx1ID.Equal(wantTxID1) {
if !tx1ID.IsEqual(wantTxID1) {
t.Errorf("TxID: wrong ID - got %v, want %v",
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
}
hash2Str := "cb1bdb4a83d4885535fb3cceb5c96597b7df903db83f0ffcd779d703affd8efd"
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
hash2Str := "b11924b7eeffea821522222576c53dc5b8ddd97602f81e5e124d2626646d74ca"
wantHash2, err := daghash.NewHashFromStr(hash2Str)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
return
}
id2Str := "ca080073d4ddf5b84443a0964af633f3c70a5b290fd3bc35a7e6f93fd33f9330"
wantID2, err := transactionid.FromString(id2Str)
id2Str := "750499ae9e6d44961ef8bad8af27a44dd4bcbea166b71baf181e8d3997e1ff72"
wantID2, err := daghash.NewTxIDFromStr(id2Str)
if err != nil {
t.Errorf("NewTxIDFromStr: %v", err)
return
@@ -202,7 +195,7 @@ func TestTxHashAndID(t *testing.T) {
txIns := []*TxIn{{
PreviousOutpoint: Outpoint{
Index: 0,
TxID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{1, 2, 3}),
TxID: daghash.TxID{1, 2, 3},
},
SignatureScript: []byte{
0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xDA, 0x0D, 0xC6, 0xAE, 0xCE, 0xFE, 0x1E, 0x06, 0xEF, 0xDF,
@@ -220,42 +213,730 @@ func TestTxHashAndID(t *testing.T) {
txOuts := []*TxOut{
{
Value: 244623243,
ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{
ScriptPubKey: []byte{
0x76, 0xA9, 0x14, 0xBA, 0xDE, 0xEC, 0xFD, 0xEF, 0x05, 0x07, 0x24, 0x7F, 0xC8, 0xF7, 0x42, 0x41,
0xD7, 0x3B, 0xC0, 0x39, 0x97, 0x2D, 0x7B, 0x88, 0xAC,
}, Version: 0},
},
},
{
Value: 44602432,
ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{
ScriptPubKey: []byte{
0x76, 0xA9, 0x14, 0xC1, 0x09, 0x32, 0x48, 0x3F, 0xEC, 0x93, 0xED, 0x51, 0xF5, 0xFE, 0x95, 0xE7,
0x25, 0x59, 0xF2, 0xCC, 0x70, 0x43, 0xF9, 0x88, 0xAC,
}, Version: 0},
},
},
}
tx2 := NewSubnetworkMsgTx(1, txIns, txOuts, &externalapi.DomainSubnetworkID{1, 2, 3}, 0, payload)
tx2 := NewSubnetworkMsgTx(1, txIns, txOuts, &subnetworkid.SubnetworkID{1, 2, 3}, 0, payload)
// Ensure the hash produced is expected.
tx2Hash := tx2.TxHash()
if !tx2Hash.Equal(wantHash2) {
if !tx2Hash.IsEqual(wantHash2) {
t.Errorf("TxHash: wrong hash - got %v, want %v",
spew.Sprint(tx2Hash), spew.Sprint(wantHash2))
}
// Ensure the TxID for coinbase transaction is the same as TxHash.
tx2ID := tx2.TxID()
if !tx2ID.Equal(wantID2) {
if !tx2ID.IsEqual(wantID2) {
t.Errorf("TxID: wrong ID - got %v, want %v",
spew.Sprint(tx2ID), spew.Sprint(wantID2))
}
if tx2ID.Equal((*externalapi.DomainTransactionID)(tx2Hash)) {
if tx2ID.IsEqual((*daghash.TxID)(tx2Hash)) {
t.Errorf("tx2ID and tx2Hash shouldn't be the same for non-coinbase transaction with signature and/or payload")
}
tx2.TxIn[0].SignatureScript = []byte{}
newTx2Hash := tx2.TxHash()
if *tx2ID == (externalapi.DomainTransactionID)(*newTx2Hash) {
t.Errorf("tx2ID and newTx2Hash should not be the same even for transaction with an empty signature")
if !tx2ID.IsEqual((*daghash.TxID)(newTx2Hash)) {
t.Errorf("tx2ID and newTx2Hash should be the same for transaction with an empty signature")
}
}
// TestTxEncoding tests the MsgTx appmessage encode and decode for various numbers
// of transaction inputs and outputs and protocol versions.
func TestTxEncoding(t *testing.T) {
// Empty tx message.
noTx := NewNativeMsgTx(1, nil, nil)
noTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
tests := []struct {
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
}{
// Latest protocol version with no transactions.
{
noTx,
noTx,
noTxEncoded,
ProtocolVersion,
},
// Latest protocol version with multiple transactions.
{
multiTx,
multiTx,
multiTxEncoded,
ProtocolVersion,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode the message to appmessage format.
var buf bytes.Buffer
err := test.in.KaspaEncode(&buf, test.pver)
if err != nil {
t.Errorf("KaspaEncode #%d error %v", i, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("KaspaEncode #%d\n got: %s want: %s", i,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Decode the message from appmessage format.
var msg MsgTx
rbuf := bytes.NewReader(test.buf)
err = msg.KaspaDecode(rbuf, test.pver)
if err != nil {
t.Errorf("KaspaDecode #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&msg, test.out) {
t.Errorf("KaspaDecode #%d\n got: %s want: %s", i,
spew.Sdump(&msg), spew.Sdump(test.out))
continue
}
}
}
// TestTxEncodingErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxEncodingErrors(t *testing.T) {
pver := ProtocolVersion
tests := []struct {
in *MsgTx // Value to encode
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{multiTx, multiTxEncoded, pver, 0, io.ErrShortWrite, io.EOF},
// Force error in number of transaction inputs.
{multiTx, multiTxEncoded, pver, 4, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block hash.
{multiTx, multiTxEncoded, pver, 5, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block output index.
{multiTx, multiTxEncoded, pver, 37, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script length.
{multiTx, multiTxEncoded, pver, 41, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script.
{multiTx, multiTxEncoded, pver, 42, io.ErrShortWrite, io.EOF},
// Force error in transaction input sequence.
{multiTx, multiTxEncoded, pver, 49, io.ErrShortWrite, io.EOF},
// Force error in number of transaction outputs.
{multiTx, multiTxEncoded, pver, 57, io.ErrShortWrite, io.EOF},
// Force error in transaction output value.
{multiTx, multiTxEncoded, pver, 58, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey length.
{multiTx, multiTxEncoded, pver, 66, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey.
{multiTx, multiTxEncoded, pver, 67, io.ErrShortWrite, io.EOF},
// Force error in transaction output lock time.
{multiTx, multiTxEncoded, pver, 210, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Encode to appmessage format.
w := newFixedWriter(test.max)
err := test.in.KaspaEncode(w, test.pver)
if !errors.Is(err, test.writeErr) {
t.Errorf("KaspaEncode #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Decode from appmessage format.
var msg MsgTx
r := newFixedReader(test.max, test.buf)
err = msg.KaspaDecode(r, test.pver)
if !errors.Is(err, test.readErr) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
}
// TestTxSerialize tests MsgTx serialize and deserialize.
func TestTxSerialize(t *testing.T) {
noTx := NewNativeMsgTx(1, nil, nil)
noTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
registryTx := NewRegistryMsgTx(1, nil, nil, 16)
registryTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Gas
0x77, 0x56, 0x36, 0xb4, 0x89, 0x32, 0xe9, 0xa8,
0xbb, 0x67, 0xe6, 0x54, 0x84, 0x36, 0x93, 0x8d,
0x9f, 0xc5, 0x62, 0x49, 0x79, 0x5c, 0x0d, 0x0a,
0x86, 0xaf, 0x7c, 0x5d, 0x54, 0x45, 0x4c, 0x4b, // Payload hash
0x08, // Payload length varint
0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Payload / Gas limit
}
subnetworkTx := NewSubnetworkMsgTx(1, nil, nil, &subnetworkid.SubnetworkID{0xff}, 5, []byte{0, 1, 2})
subnetworkTxEncoded := []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x00, // Varint for number of input transactions
0x00, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Gas
0x35, 0xf9, 0xf2, 0x93, 0x0e, 0xa3, 0x44, 0x61,
0x88, 0x22, 0x79, 0x5e, 0xee, 0xc5, 0x68, 0xae,
0x67, 0xab, 0x29, 0x87, 0xd8, 0xb1, 0x9e, 0x45,
0x91, 0xe1, 0x05, 0x27, 0xba, 0xa1, 0xdf, 0x3d, // Payload hash
0x03, // Payload length varint
0x00, 0x01, 0x02, // Payload
}
tests := []struct {
name string
in *MsgTx // Message to encode
out *MsgTx // Expected decoded message
buf []byte // Serialized data
scriptPubKeyLocs []int // Expected output script locations
}{
// No transactions.
{
"noTx",
noTx,
noTx,
noTxEncoded,
nil,
},
// Registry Transaction.
{
"registryTx",
registryTx,
registryTx,
registryTxEncoded,
nil,
},
// Sub Network Transaction.
{
"subnetworkTx",
subnetworkTx,
subnetworkTx,
subnetworkTxEncoded,
nil,
},
// Multiple transactions.
{
"multiTx",
multiTx,
multiTx,
multiTxEncoded,
multiTxScriptPubKeyLocs,
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the transaction.
var buf bytes.Buffer
err := test.in.Serialize(&buf)
if err != nil {
t.Errorf("Serialize %s: error %v", test.name, err)
continue
}
if !bytes.Equal(buf.Bytes(), test.buf) {
t.Errorf("Serialize %s:\n got: %s want: %s", test.name,
spew.Sdump(buf.Bytes()), spew.Sdump(test.buf))
continue
}
// Deserialize the transaction.
var tx MsgTx
rbuf := bytes.NewReader(test.buf)
err = tx.Deserialize(rbuf)
if err != nil {
t.Errorf("Deserialize #%d error %v", i, err)
continue
}
if !reflect.DeepEqual(&tx, test.out) {
t.Errorf("Deserialize #%d\n got: %s want: %s", i,
spew.Sdump(&tx), spew.Sdump(test.out))
continue
}
// Ensure the public key script locations are accurate.
scriptPubKeyLocs := test.in.ScriptPubKeyLocs()
if !reflect.DeepEqual(scriptPubKeyLocs, test.scriptPubKeyLocs) {
t.Errorf("ScriptPubKeyLocs #%d\n got: %s want: %s", i,
spew.Sdump(scriptPubKeyLocs),
spew.Sdump(test.scriptPubKeyLocs))
continue
}
for j, loc := range scriptPubKeyLocs {
wantScriptPubKey := test.in.TxOut[j].ScriptPubKey
gotScriptPubKey := test.buf[loc : loc+len(wantScriptPubKey)]
if !bytes.Equal(gotScriptPubKey, wantScriptPubKey) {
t.Errorf("ScriptPubKeyLocs #%d:%d\n unexpected "+
"script got: %s want: %s", i, j,
spew.Sdump(gotScriptPubKey),
spew.Sdump(wantScriptPubKey))
}
}
}
}
// TestTxSerializeErrors performs negative tests against appmessage encode and decode
// of MsgTx to confirm error paths work correctly.
func TestTxSerializeErrors(t *testing.T) {
tests := []struct {
in *MsgTx // Value to encode
buf []byte // Serialized data
max int // Max size of fixed buffer to induce errors
writeErr error // Expected write error
readErr error // Expected read error
}{
// Force error in version.
{multiTx, multiTxEncoded, 0, io.ErrShortWrite, io.EOF},
// Force error in number of transaction inputs.
{multiTx, multiTxEncoded, 4, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block hash.
{multiTx, multiTxEncoded, 5, io.ErrShortWrite, io.EOF},
// Force error in transaction input previous block output index.
{multiTx, multiTxEncoded, 37, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script length.
{multiTx, multiTxEncoded, 41, io.ErrShortWrite, io.EOF},
// Force error in transaction input signature script.
{multiTx, multiTxEncoded, 42, io.ErrShortWrite, io.EOF},
// Force error in transaction input sequence.
{multiTx, multiTxEncoded, 49, io.ErrShortWrite, io.EOF},
// Force error in number of transaction outputs.
{multiTx, multiTxEncoded, 57, io.ErrShortWrite, io.EOF},
// Force error in transaction output value.
{multiTx, multiTxEncoded, 58, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey length.
{multiTx, multiTxEncoded, 66, io.ErrShortWrite, io.EOF},
// Force error in transaction output scriptPubKey.
{multiTx, multiTxEncoded, 67, io.ErrShortWrite, io.EOF},
// Force error in transaction output lock time.
{multiTx, multiTxEncoded, 210, io.ErrShortWrite, io.EOF},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Serialize the transaction.
w := newFixedWriter(test.max)
err := test.in.Serialize(w)
if !errors.Is(err, test.writeErr) {
t.Errorf("Serialize #%d wrong error got: %v, want: %v",
i, err, test.writeErr)
continue
}
// Deserialize the transaction.
var tx MsgTx
r := newFixedReader(test.max, test.buf)
err = tx.Deserialize(r)
if !errors.Is(err, test.readErr) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, test.readErr)
continue
}
}
registryTx := NewSubnetworkMsgTx(1, nil, nil, subnetworkid.SubnetworkIDRegistry, 1, nil)
w := bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err := registryTx.Serialize(w)
str := "Transactions from built-in should have 0 gas"
expectedErr := messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
nativeTx := NewSubnetworkMsgTx(1, nil, nil, subnetworkid.SubnetworkIDNative, 1, nil)
w = bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err = nativeTx.Serialize(w)
str = "Transactions from native subnetwork should have 0 gas"
expectedErr = messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
nativeTx.Gas = 0
nativeTx.Payload = []byte{1, 2, 3}
nativeTx.PayloadHash = daghash.DoubleHashP(nativeTx.Payload)
w = bytes.NewBuffer(make([]byte, 0, registryTx.SerializeSize()))
err = nativeTx.Serialize(w)
str = "Transactions from native subnetwork should have <nil> payload"
expectedErr = messageError("MsgTx.KaspaEncode", str)
if err == nil || err.Error() != expectedErr.Error() {
t.Errorf("TestTxSerializeErrors: expected error %v but got %v", expectedErr, err)
}
}
// TestTxOverflowErrors performs tests to ensure deserializing transactions
// which are intentionally crafted to use large values for the variable number
// of inputs and outputs are handled properly. This could otherwise potentially
// be used as an attack vector.
func TestTxOverflowErrors(t *testing.T) {
pver := ProtocolVersion
txVer := uint32(1)
tests := []struct {
buf []byte // Encoded value
pver uint32 // Protocol version for appmessage encoding
version uint32 // Transaction version
err error // Expected error
}{
// Transaction that claims to have ~uint64(0) inputs.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for number of input transactions
}, pver, txVer, &MessageError{},
},
// Transaction that claims to have ~uint64(0) outputs.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0x00, // Varint for number of input transactions
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for number of output transactions
}, pver, txVer, &MessageError{},
},
// Transaction that has an input with a signature script that
// claims to have ~uint64(0) length.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for length of signature script
}, pver, txVer, &MessageError{},
},
// Transaction that has an output with a public key script
// that claims to have ~uint64(0) length.
{
[]byte{
0x00, 0x00, 0x00, 0x01, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x00, // Varint for length of signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x01, // Varint for number of output transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Transaction amount
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, // Varint for length of public key script
}, pver, txVer, &MessageError{},
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
// Decode from appmessage format.
var msg MsgTx
r := bytes.NewReader(test.buf)
err := msg.KaspaDecode(r, test.pver)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("KaspaDecode #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
// Decode from appmessage format.
r = bytes.NewReader(test.buf)
err = msg.Deserialize(r)
if reflect.TypeOf(err) != reflect.TypeOf(test.err) {
t.Errorf("Deserialize #%d wrong error got: %v, want: %v",
i, err, reflect.TypeOf(test.err))
continue
}
}
}
// TestTxSerializeSize performs tests to ensure the serialize size for
// various transactions is accurate.
func TestTxSerializeSize(t *testing.T) {
// Empty tx message.
noTx := NewNativeMsgTx(1, nil, nil)
tests := []struct {
in *MsgTx // Tx to encode
size int // Expected serialized size
}{
// No inputs or outpus.
{noTx, 34},
// Transcaction with an input and an output.
{multiTx, 238},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
serializedSize := test.in.SerializeSize()
if serializedSize != test.size {
t.Errorf("MsgTx.SerializeSize: #%d got: %d, want: %d", i,
serializedSize, test.size)
continue
}
}
}
func TestIsSubnetworkCompatible(t *testing.T) {
testTx := NewSubnetworkMsgTx(1, nil, nil, &subnetworkid.SubnetworkID{123}, 0, []byte{})
tests := []struct {
name string
subnetworkID *subnetworkid.SubnetworkID
expectedResult bool
}{
{
name: "Native subnetwork",
subnetworkID: subnetworkid.SubnetworkIDNative,
expectedResult: true,
},
{
name: "same subnetwork as test tx",
subnetworkID: &subnetworkid.SubnetworkID{123},
expectedResult: true,
},
{
name: "other subnetwork",
subnetworkID: &subnetworkid.SubnetworkID{234},
expectedResult: false,
},
}
for _, test := range tests {
result := testTx.IsSubnetworkCompatible(test.subnetworkID)
if result != test.expectedResult {
t.Errorf("IsSubnetworkCompatible got unexpected result in test '%s': "+
"expected: %t, want: %t", test.name, test.expectedResult, result)
}
}
}
func TestScriptFreeList(t *testing.T) {
var list scriptFreeList = make(chan []byte, freeListMaxItems)
expectedCapacity := 512
expectedLengthFirst := 12
expectedLengthSecond := 13
first := list.Borrow(uint64(expectedLengthFirst))
if cap(first) != expectedCapacity {
t.Errorf("MsgTx.TestScriptFreeList: Expected capacity for first %d, but got %d",
expectedCapacity, cap(first))
}
if len(first) != expectedLengthFirst {
t.Errorf("MsgTx.TestScriptFreeList: Expected length for first %d, but got %d",
expectedLengthFirst, len(first))
}
list.Return(first)
// Borrow again, and check that the underlying array is re-used for second
second := list.Borrow(uint64(expectedLengthSecond))
if cap(second) != expectedCapacity {
t.Errorf("MsgTx.TestScriptFreeList: Expected capacity for second %d, but got %d",
expectedCapacity, cap(second))
}
if len(second) != expectedLengthSecond {
t.Errorf("MsgTx.TestScriptFreeList: Expected length for second %d, but got %d",
expectedLengthSecond, len(second))
}
firstArrayAddress := underlyingArrayAddress(first)
secondArrayAddress := underlyingArrayAddress(second)
if firstArrayAddress != secondArrayAddress {
t.Errorf("First underlying array is at address %d and second at address %d, "+
"which means memory was not re-used", firstArrayAddress, secondArrayAddress)
}
list.Return(second)
// test for buffers bigger than freeListMaxScriptSize
expectedCapacityBig := freeListMaxScriptSize + 1
expectedLengthBig := expectedCapacityBig
big := list.Borrow(uint64(expectedCapacityBig))
if cap(big) != expectedCapacityBig {
t.Errorf("MsgTx.TestScriptFreeList: Expected capacity for second %d, but got %d",
expectedCapacityBig, cap(big))
}
if len(big) != expectedLengthBig {
t.Errorf("MsgTx.TestScriptFreeList: Expected length for second %d, but got %d",
expectedLengthBig, len(big))
}
list.Return(big)
// test there's no crash when channel is full because borrowed too much
buffers := make([][]byte, freeListMaxItems+1)
for i := 0; i < freeListMaxItems+1; i++ {
buffers[i] = list.Borrow(1)
}
for i := 0; i < freeListMaxItems+1; i++ {
list.Return(buffers[i])
}
}
func underlyingArrayAddress(buf []byte) uint64 {
return uint64((*reflect.SliceHeader)(unsafe.Pointer(&buf)).Data)
}
// multiTx is a MsgTx with an input and output and used in various tests.
var multiTxIns = []*TxIn{
{
PreviousOutpoint: Outpoint{
TxID: daghash.TxID{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62,
},
Sequence: math.MaxUint64,
},
}
var multiTxOuts = []*TxOut{
{
Value: 0x12a05f200,
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
{
Value: 0x5f5e100,
ScriptPubKey: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
}
var multiTx = NewNativeMsgTx(1, multiTxIns, multiTxOuts)
// multiTxEncoded is the appmessage encoded bytes for multiTx using protocol version
// 60002 and is used in the various tests.
var multiTxEncoded = []byte{
0x01, 0x00, 0x00, 0x00, // Version
0x01, // Varint for number of input transactions
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash
0xff, 0xff, 0xff, 0xff, // Prevous output index
0x07, // Varint for length of signature script
0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence
0x02, // Varint for number of output transactions
0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0xe1, 0xf5, 0x05, 0x00, 0x00, 0x00, 0x00, // Transaction amount
0x43, // Varint for length of scriptPubKey
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Sub Network ID
}
// multiTxScriptPubKeyLocs is the location information for the public key scripts
// located in multiTx.
var multiTxScriptPubKeyLocs = []int{67, 143}

View File

@@ -6,13 +6,14 @@ package appmessage
import (
"fmt"
"strings"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/version"
"strings"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
)
// MaxUserAgentLen is the maximum allowed length for the user agent field in a
@@ -53,11 +54,14 @@ type MsgVersion struct {
// on the appmessage. This has a max length of MaxUserAgentLen.
UserAgent string
// The selected tip hash of the generator of the version message.
SelectedTipHash *daghash.Hash
// Don't announce transactions to peer.
DisableRelayTx bool
// The subnetwork of the generator of the version message. Should be nil in full nodes
SubnetworkID *externalapi.DomainSubnetworkID
SubnetworkID *subnetworkid.SubnetworkID
}
// HasService returns whether the specified service is supported by the peer
@@ -82,7 +86,7 @@ func (msg *MsgVersion) Command() MessageCommand {
// Message interface using the passed parameters and defaults for the remaining
// fields.
func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
subnetworkID *externalapi.DomainSubnetworkID) *MsgVersion {
selectedTipHash *daghash.Hash, subnetworkID *subnetworkid.SubnetworkID) *MsgVersion {
// Limit the timestamp to one millisecond precision since the protocol
// doesn't support better.
@@ -94,6 +98,7 @@ func NewMsgVersion(addr *NetAddress, id *id.ID, network string,
Address: addr,
ID: id,
UserAgent: DefaultUserAgent,
SelectedTipHash: selectedTipHash,
DisableRelayTx: false,
SubnetworkID: subnetworkID,
}

View File

@@ -5,12 +5,12 @@
package appmessage
import (
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util/daghash"
"net"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
)
// TestVersion tests the MsgVersion API.
@@ -18,6 +18,7 @@ func TestVersion(t *testing.T) {
pver := ProtocolVersion
// Create version message data.
selectedTipHash := &daghash.Hash{12, 34}
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
generatedID, err := id.GenerateID()
@@ -26,7 +27,7 @@ func TestVersion(t *testing.T) {
}
// Ensure we get the correct data back out.
msg := NewMsgVersion(me, generatedID, "mainnet", nil)
msg := NewMsgVersion(me, generatedID, "mainnet", selectedTipHash, nil)
if msg.ProtocolVersion != pver {
t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v",
msg.ProtocolVersion, pver)
@@ -43,6 +44,10 @@ func TestVersion(t *testing.T) {
t.Errorf("NewMsgVersion: wrong user agent - got %v, want %v",
msg.UserAgent, DefaultUserAgent)
}
if !msg.SelectedTipHash.IsEqual(selectedTipHash) {
t.Errorf("NewMsgVersion: wrong selected tip hash - got %s, want %s",
msg.SelectedTipHash, selectedTipHash)
}
if msg.DisableRelayTx {
t.Errorf("NewMsgVersion: disable relay tx is not false by "+
"default - got %v, want %v", msg.DisableRelayTx, false)

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgRequestPruningPointHashMessage represents a kaspa RequestPruningPointHashMessage message
type MsgRequestPruningPointHashMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgRequestPruningPointHashMessage) Command() MessageCommand {
return CmdRequestPruningPointHash
}
// NewMsgRequestPruningPointHashMessage returns a new kaspa RequestPruningPointHash message
func NewMsgRequestPruningPointHashMessage() *MsgRequestPruningPointHashMessage {
return &MsgRequestPruningPointHashMessage{}
}

View File

@@ -1,16 +0,0 @@
package appmessage
// MsgUnexpectedPruningPoint represents a kaspa UnexpectedPruningPoint message
type MsgUnexpectedPruningPoint struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *MsgUnexpectedPruningPoint) Command() MessageCommand {
return CmdUnexpectedPruningPoint
}
// NewMsgUnexpectedPruningPoint returns a new kaspa UnexpectedPruningPoint message
func NewMsgUnexpectedPruningPoint() *MsgUnexpectedPruningPoint {
return &MsgUnexpectedPruningPoint{}
}

View File

@@ -1,39 +0,0 @@
package appmessage
// BanRequestMessage is an appmessage corresponding to
// its respective RPC message
type BanRequestMessage struct {
baseMessage
IP string
}
// Command returns the protocol command string for the message
func (msg *BanRequestMessage) Command() MessageCommand {
return CmdBanRequestMessage
}
// NewBanRequestMessage returns an instance of the message
func NewBanRequestMessage(ip string) *BanRequestMessage {
return &BanRequestMessage{
IP: ip,
}
}
// BanResponseMessage is an appmessage corresponding to
// its respective RPC message
type BanResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *BanResponseMessage) Command() MessageCommand {
return CmdBanResponseMessage
}
// NewBanResponseMessage returns a instance of the message
func NewBanResponseMessage() *BanResponseMessage {
return &BanResponseMessage{}
}

View File

@@ -5,6 +5,9 @@ package appmessage
type GetBlockRequestMessage struct {
baseMessage
Hash string
SubnetworkID string
IncludeBlockHex bool
IncludeBlockVerboseData bool
IncludeTransactionVerboseData bool
}
@@ -14,9 +17,13 @@ func (msg *GetBlockRequestMessage) Command() MessageCommand {
}
// NewGetBlockRequestMessage returns a instance of the message
func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool) *GetBlockRequestMessage {
func NewGetBlockRequestMessage(hash string, subnetworkID string, includeBlockHex bool,
includeBlockVerboseData bool, includeTransactionVerboseData bool) *GetBlockRequestMessage {
return &GetBlockRequestMessage{
Hash: hash,
SubnetworkID: subnetworkID,
IncludeBlockHex: includeBlockHex,
IncludeBlockVerboseData: includeBlockVerboseData,
IncludeTransactionVerboseData: includeTransactionVerboseData,
}
}
@@ -25,6 +32,7 @@ func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool)
// its respective RPC message
type GetBlockResponseMessage struct {
baseMessage
BlockHex string
BlockVerboseData *BlockVerboseData
Error *RPCError
@@ -43,7 +51,11 @@ func NewGetBlockResponseMessage() *GetBlockResponseMessage {
// BlockVerboseData holds verbose data about a block
type BlockVerboseData struct {
Hash string
Version uint16
Confirmations uint64
Size int32
BlueScore uint64
IsChainBlock bool
Version int32
VersionHex string
HashMerkleRoot string
AcceptedIDMerkleRoot string
@@ -55,18 +67,18 @@ type BlockVerboseData struct {
Bits string
Difficulty float64
ParentHashes []string
ChildrenHashes []string
SelectedParentHash string
BlueScore uint64
IsHeaderOnly bool
ChildHashes []string
AcceptedBlockHashes []string
}
// TransactionVerboseData holds verbose data about a transaction
type TransactionVerboseData struct {
Hex string
TxID string
Hash string
Size uint64
Version uint16
Size int32
Version int32
LockTime uint64
SubnetworkID string
Gas uint64
@@ -75,6 +87,8 @@ type TransactionVerboseData struct {
TransactionVerboseInputs []*TransactionVerboseInput
TransactionVerboseOutputs []*TransactionVerboseOutput
BlockHash string
AcceptedBy string
IsInMempool bool
Time uint64
BlockTime uint64
}
@@ -102,8 +116,8 @@ type TransactionVerboseOutput struct {
// ScriptPubKeyResult holds data about a script public key
type ScriptPubKeyResult struct {
Asm string
Hex string
Type string
Address string
Version uint16
}

View File

@@ -1,7 +1,5 @@
package appmessage
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
// GetBlockCountRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetBlockCountRequestMessage struct {
@@ -22,8 +20,7 @@ func NewGetBlockCountRequestMessage() *GetBlockCountRequestMessage {
// its respective RPC message
type GetBlockCountResponseMessage struct {
baseMessage
BlockCount uint64
HeaderCount uint64
BlockCount uint64
Error *RPCError
}
@@ -34,9 +31,8 @@ func (msg *GetBlockCountResponseMessage) Command() MessageCommand {
}
// NewGetBlockCountResponseMessage returns a instance of the message
func NewGetBlockCountResponseMessage(syncInfo *externalapi.SyncInfo) *GetBlockCountResponseMessage {
func NewGetBlockCountResponseMessage(blockCount uint64) *GetBlockCountResponseMessage {
return &GetBlockCountResponseMessage{
BlockCount: syncInfo.BlockCount,
HeaderCount: syncInfo.HeaderCount,
BlockCount: blockCount,
}
}

View File

@@ -22,12 +22,10 @@ type GetBlockDAGInfoResponseMessage struct {
baseMessage
NetworkName string
BlockCount uint64
HeaderCount uint64
TipHashes []string
VirtualParentHashes []string
Difficulty float64
PastMedianTime int64
PruningPointHash string
Error *RPCError
}

View File

@@ -5,6 +5,7 @@ package appmessage
type GetBlockTemplateRequestMessage struct {
baseMessage
PayAddress string
LongPollID string
}
// Command returns the protocol command string for the message
@@ -13,9 +14,10 @@ func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand {
}
// NewGetBlockTemplateRequestMessage returns a instance of the message
func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateRequestMessage {
func NewGetBlockTemplateRequestMessage(payAddress string, longPollID string) *GetBlockTemplateRequestMessage {
return &GetBlockTemplateRequestMessage{
PayAddress: payAddress,
LongPollID: longPollID,
}
}
@@ -23,8 +25,23 @@ func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateReque
// its respective RPC message
type GetBlockTemplateResponseMessage struct {
baseMessage
MsgBlock *MsgBlock
IsSynced bool
Bits string
CurrentTime int64
ParentHashes []string
MassLimit int
Transactions []GetBlockTemplateTransactionMessage
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Version int32
LongPollID string
TargetDifficulty string
MinTime int64
MaxTime int64
MutableFields []string
NonceRange string
IsSynced bool
IsConnected bool
Error *RPCError
}
@@ -35,9 +52,27 @@ func (msg *GetBlockTemplateResponseMessage) Command() MessageCommand {
}
// NewGetBlockTemplateResponseMessage returns a instance of the message
func NewGetBlockTemplateResponseMessage(msgBlock *MsgBlock, isSynced bool) *GetBlockTemplateResponseMessage {
return &GetBlockTemplateResponseMessage{
MsgBlock: msgBlock,
IsSynced: isSynced,
}
func NewGetBlockTemplateResponseMessage() *GetBlockTemplateResponseMessage {
return &GetBlockTemplateResponseMessage{}
}
// GetBlockTemplateTransactionMessage is an appmessage corresponding to
// its respective RPC message
type GetBlockTemplateTransactionMessage struct {
baseMessage
Data string
ID string
Depends []int64
Mass uint64
Fee uint64
}
// Command returns the protocol command string for the message
func (msg *GetBlockTemplateTransactionMessage) Command() MessageCommand {
return CmdGetBlockTemplateTransactionMessage
}
// NewGetBlockTemplateTransactionMessage returns a instance of the message
func NewGetBlockTemplateTransactionMessage() *GetBlockTemplateTransactionMessage {
return &GetBlockTemplateTransactionMessage{}
}

View File

@@ -4,9 +4,9 @@ package appmessage
// its respective RPC message
type GetBlocksRequestMessage struct {
baseMessage
LowHash string
IncludeBlockVerboseData bool
IncludeTransactionVerboseData bool
LowHash string
IncludeBlockHexes bool
IncludeBlockVerboseData bool
}
// Command returns the protocol command string for the message
@@ -15,12 +15,11 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
}
// NewGetBlocksRequestMessage returns a instance of the message
func NewGetBlocksRequestMessage(lowHash string, includeBlockVerboseData bool,
includeTransactionVerboseData bool) *GetBlocksRequestMessage {
func NewGetBlocksRequestMessage(lowHash string, includeBlockHexes bool, includeBlockVerboseData bool) *GetBlocksRequestMessage {
return &GetBlocksRequestMessage{
LowHash: lowHash,
IncludeBlockVerboseData: includeBlockVerboseData,
IncludeTransactionVerboseData: includeTransactionVerboseData,
LowHash: lowHash,
IncludeBlockHexes: includeBlockHexes,
IncludeBlockVerboseData: includeBlockVerboseData,
}
}
@@ -29,6 +28,7 @@ func NewGetBlocksRequestMessage(lowHash string, includeBlockVerboseData bool,
type GetBlocksResponseMessage struct {
baseMessage
BlockHashes []string
BlockHexes []string
BlockVerboseData []*BlockVerboseData
Error *RPCError
@@ -45,6 +45,7 @@ func NewGetBlocksResponseMessage(blockHashes []string, blockHexes []string,
return &GetBlocksResponseMessage{
BlockHashes: blockHashes,
BlockHexes: blockHexes,
BlockVerboseData: blockVerboseData,
}
}

View File

@@ -0,0 +1,49 @@
package appmessage
// GetChainFromBlockRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetChainFromBlockRequestMessage struct {
baseMessage
StartHash string
IncludeBlockVerboseData bool
}
// Command returns the protocol command string for the message
func (msg *GetChainFromBlockRequestMessage) Command() MessageCommand {
return CmdGetChainFromBlockRequestMessage
}
// NewGetChainFromBlockRequestMessage returns a instance of the message
func NewGetChainFromBlockRequestMessage(startHash string, includeBlockVerboseData bool) *GetChainFromBlockRequestMessage {
return &GetChainFromBlockRequestMessage{
StartHash: startHash,
IncludeBlockVerboseData: includeBlockVerboseData,
}
}
// GetChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetChainFromBlockResponseMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
BlockVerboseData []*BlockVerboseData
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetChainFromBlockResponseMessage) Command() MessageCommand {
return CmdGetChainFromBlockResponseMessage
}
// NewGetChainFromBlockResponseMessage returns a instance of the message
func NewGetChainFromBlockResponseMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock, blockVerboseData []*BlockVerboseData) *GetChainFromBlockResponseMessage {
return &GetChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
BlockVerboseData: blockVerboseData,
}
}

View File

@@ -41,10 +41,11 @@ type GetConnectedPeerInfoMessage struct {
ID string
Address string
LastPingDuration int64
SelectedTipHash string
IsSyncNode bool
IsOutbound bool
TimeOffset int64
UserAgent string
AdvertisedProtocolVersion uint32
TimeConnected int64
IsIBDPeer bool
}

View File

@@ -1,38 +0,0 @@
package appmessage
// GetInfoRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetInfoRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetInfoRequestMessage) Command() MessageCommand {
return CmdGetInfoRequestMessage
}
// NewGeInfoRequestMessage returns a instance of the message
func NewGeInfoRequestMessage() *GetInfoRequestMessage {
return &GetInfoRequestMessage{}
}
// GetInfoResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetInfoResponseMessage struct {
baseMessage
P2PID string
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetInfoResponseMessage) Command() MessageCommand {
return CmdGetInfoResponseMessage
}
// NewGetInfoResponseMessage returns a instance of the message
func NewGetInfoResponseMessage(p2pID string) *GetInfoResponseMessage {
return &GetInfoResponseMessage{
P2PID: p2pID,
}
}

View File

@@ -1,41 +0,0 @@
package appmessage
// GetUTXOsByAddressesRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetUTXOsByAddressesRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *GetUTXOsByAddressesRequestMessage) Command() MessageCommand {
return CmdGetUTXOsByAddressesRequestMessage
}
// NewGetUTXOsByAddressesRequestMessage returns a instance of the message
func NewGetUTXOsByAddressesRequestMessage(addresses []string) *GetUTXOsByAddressesRequestMessage {
return &GetUTXOsByAddressesRequestMessage{
Addresses: addresses,
}
}
// GetUTXOsByAddressesResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetUTXOsByAddressesResponseMessage struct {
baseMessage
Entries []*UTXOsByAddressesEntry
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetUTXOsByAddressesResponseMessage) Command() MessageCommand {
return CmdGetUTXOsByAddressesResponseMessage
}
// NewGetUTXOsByAddressesResponseMessage returns a instance of the message
func NewGetUTXOsByAddressesResponseMessage(entries []*UTXOsByAddressesEntry) *GetUTXOsByAddressesResponseMessage {
return &GetUTXOsByAddressesResponseMessage{
Entries: entries,
}
}

View File

@@ -1,38 +0,0 @@
package appmessage
// GetVirtualSelectedParentBlueScoreRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentBlueScoreRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentBlueScoreRequestMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentBlueScoreRequestMessage
}
// NewGetVirtualSelectedParentBlueScoreRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentBlueScoreRequestMessage() *GetVirtualSelectedParentBlueScoreRequestMessage {
return &GetVirtualSelectedParentBlueScoreRequestMessage{}
}
// GetVirtualSelectedParentBlueScoreResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentBlueScoreResponseMessage struct {
baseMessage
BlueScore uint64
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentBlueScoreResponseMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentBlueScoreResponseMessage
}
// NewGetVirtualSelectedParentBlueScoreResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentBlueScoreResponseMessage(blueScore uint64) *GetVirtualSelectedParentBlueScoreResponseMessage {
return &GetVirtualSelectedParentBlueScoreResponseMessage{
BlueScore: blueScore,
}
}

View File

@@ -1,45 +0,0 @@
package appmessage
// GetVirtualSelectedParentChainFromBlockRequestMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentChainFromBlockRequestMessage struct {
baseMessage
StartHash string
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentChainFromBlockRequestMessage
}
// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash string) *GetVirtualSelectedParentChainFromBlockRequestMessage {
return &GetVirtualSelectedParentChainFromBlockRequestMessage{
StartHash: startHash,
}
}
// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type GetVirtualSelectedParentChainFromBlockResponseMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() MessageCommand {
return CmdGetVirtualSelectedParentChainFromBlockResponseMessage
}
// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message
func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *GetVirtualSelectedParentChainFromBlockResponseMessage {
return &GetVirtualSelectedParentChainFromBlockResponseMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -37,8 +37,7 @@ func NewNotifyBlockAddedResponseMessage() *NotifyBlockAddedResponseMessage {
// its respective RPC message
type BlockAddedNotificationMessage struct {
baseMessage
Block *MsgBlock
BlockVerboseData *BlockVerboseData
Block *MsgBlock
}
// Command returns the protocol command string for the message
@@ -47,9 +46,8 @@ func (msg *BlockAddedNotificationMessage) Command() MessageCommand {
}
// NewBlockAddedNotificationMessage returns a instance of the message
func NewBlockAddedNotificationMessage(block *MsgBlock, blockVerboseData *BlockVerboseData) *BlockAddedNotificationMessage {
func NewBlockAddedNotificationMessage(block *MsgBlock) *BlockAddedNotificationMessage {
return &BlockAddedNotificationMessage{
Block: block,
BlockVerboseData: blockVerboseData,
Block: block,
}
}

View File

@@ -0,0 +1,69 @@
package appmessage
// NotifyChainChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyChainChangedRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyChainChangedRequestMessage) Command() MessageCommand {
return CmdNotifyChainChangedRequestMessage
}
// NewNotifyChainChangedRequestMessage returns a instance of the message
func NewNotifyChainChangedRequestMessage() *NotifyChainChangedRequestMessage {
return &NotifyChainChangedRequestMessage{}
}
// NotifyChainChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyChainChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyChainChangedResponseMessage) Command() MessageCommand {
return CmdNotifyChainChangedResponseMessage
}
// NewNotifyChainChangedResponseMessage returns a instance of the message
func NewNotifyChainChangedResponseMessage() *NotifyChainChangedResponseMessage {
return &NotifyChainChangedResponseMessage{}
}
// ChainChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type ChainChangedNotificationMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
}
// ChainBlock represents a DAG chain-block
type ChainBlock struct {
Hash string
AcceptedBlocks []*AcceptedBlock
}
// AcceptedBlock represents a block accepted into the DAG
type AcceptedBlock struct {
Hash string
AcceptedTxIDs []string
}
// Command returns the protocol command string for the message
func (msg *ChainChangedNotificationMessage) Command() MessageCommand {
return CmdChainChangedNotificationMessage
}
// NewChainChangedNotificationMessage returns a instance of the message
func NewChainChangedNotificationMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *ChainChangedNotificationMessage {
return &ChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -1,83 +0,0 @@
package appmessage
// NotifyPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyPruningPointUTXOSetOverrideRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
}
// NewNotifyPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
func NewNotifyPruningPointUTXOSetOverrideRequestMessage() *NotifyPruningPointUTXOSetOverrideRequestMessage {
return &NotifyPruningPointUTXOSetOverrideRequestMessage{}
}
// NotifyPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyPruningPointUTXOSetOverrideResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
}
// NewNotifyPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
func NewNotifyPruningPointUTXOSetOverrideResponseMessage() *NotifyPruningPointUTXOSetOverrideResponseMessage {
return &NotifyPruningPointUTXOSetOverrideResponseMessage{}
}
// PruningPointUTXOSetOverrideNotificationMessage is an appmessage corresponding to
// its respective RPC message
type PruningPointUTXOSetOverrideNotificationMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *PruningPointUTXOSetOverrideNotificationMessage) Command() MessageCommand {
return CmdPruningPointUTXOSetOverrideNotificationMessage
}
// NewPruningPointUTXOSetOverrideNotificationMessage returns a instance of the message
func NewPruningPointUTXOSetOverrideNotificationMessage() *PruningPointUTXOSetOverrideNotificationMessage {
return &PruningPointUTXOSetOverrideNotificationMessage{}
}
// StopNotifyingPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to
// its respective RPC message
type StopNotifyingPruningPointUTXOSetOverrideRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand {
return CmdNotifyPruningPointUTXOSetOverrideRequestMessage
}
// NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage returns a instance of the message
func NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage() *StopNotifyingPruningPointUTXOSetOverrideRequestMessage {
return &StopNotifyingPruningPointUTXOSetOverrideRequestMessage{}
}
// StopNotifyingPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to
// its respective RPC message
type StopNotifyingPruningPointUTXOSetOverrideResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand {
return CmdNotifyPruningPointUTXOSetOverrideResponseMessage
}
// NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage returns a instance of the message
func NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage() *StopNotifyingPruningPointUTXOSetOverrideResponseMessage {
return &StopNotifyingPruningPointUTXOSetOverrideResponseMessage{}
}

View File

@@ -1,62 +0,0 @@
package appmessage
// NotifyUTXOsChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyUTXOsChangedRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *NotifyUTXOsChangedRequestMessage) Command() MessageCommand {
return CmdNotifyUTXOsChangedRequestMessage
}
// NewNotifyUTXOsChangedRequestMessage returns a instance of the message
func NewNotifyUTXOsChangedRequestMessage(addresses []string) *NotifyUTXOsChangedRequestMessage {
return &NotifyUTXOsChangedRequestMessage{
Addresses: addresses,
}
}
// NotifyUTXOsChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyUTXOsChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyUTXOsChangedResponseMessage) Command() MessageCommand {
return CmdNotifyUTXOsChangedResponseMessage
}
// NewNotifyUTXOsChangedResponseMessage returns a instance of the message
func NewNotifyUTXOsChangedResponseMessage() *NotifyUTXOsChangedResponseMessage {
return &NotifyUTXOsChangedResponseMessage{}
}
// UTXOsChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type UTXOsChangedNotificationMessage struct {
baseMessage
Added []*UTXOsByAddressesEntry
Removed []*UTXOsByAddressesEntry
}
// UTXOsByAddressesEntry represents a UTXO of some address
type UTXOsByAddressesEntry struct {
Address string
Outpoint *RPCOutpoint
UTXOEntry *RPCUTXOEntry
}
// Command returns the protocol command string for the message
func (msg *UTXOsChangedNotificationMessage) Command() MessageCommand {
return CmdUTXOsChangedNotificationMessage
}
// NewUTXOsChangedNotificationMessage returns a instance of the message
func NewUTXOsChangedNotificationMessage() *UTXOsChangedNotificationMessage {
return &UTXOsChangedNotificationMessage{}
}

View File

@@ -1,55 +0,0 @@
package appmessage
// NotifyVirtualSelectedParentBlueScoreChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentBlueScoreChangedRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage
}
// NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage() *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage {
return &NotifyVirtualSelectedParentBlueScoreChangedRequestMessage{}
}
// NotifyVirtualSelectedParentBlueScoreChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentBlueScoreChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage
}
// NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage returns a instance of the message
func NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage() *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage {
return &NotifyVirtualSelectedParentBlueScoreChangedResponseMessage{}
}
// VirtualSelectedParentBlueScoreChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type VirtualSelectedParentBlueScoreChangedNotificationMessage struct {
baseMessage
VirtualSelectedParentBlueScore uint64
}
// Command returns the protocol command string for the message
func (msg *VirtualSelectedParentBlueScoreChangedNotificationMessage) Command() MessageCommand {
return CmdVirtualSelectedParentBlueScoreChangedNotificationMessage
}
// NewVirtualSelectedParentBlueScoreChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentBlueScoreChangedNotificationMessage(
virtualSelectedParentBlueScore uint64) *VirtualSelectedParentBlueScoreChangedNotificationMessage {
return &VirtualSelectedParentBlueScoreChangedNotificationMessage{
VirtualSelectedParentBlueScore: virtualSelectedParentBlueScore,
}
}

View File

@@ -1,69 +0,0 @@
package appmessage
// NotifyVirtualSelectedParentChainChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentChainChangedRequestMessage struct {
baseMessage
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentChainChangedRequestMessage
}
// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedRequestMessage() *NotifyVirtualSelectedParentChainChangedRequestMessage {
return &NotifyVirtualSelectedParentChainChangedRequestMessage{}
}
// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type NotifyVirtualSelectedParentChainChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *NotifyVirtualSelectedParentChainChangedResponseMessage) Command() MessageCommand {
return CmdNotifyVirtualSelectedParentChainChangedResponseMessage
}
// NewNotifyVirtualSelectedParentChainChangedResponseMessage returns a instance of the message
func NewNotifyVirtualSelectedParentChainChangedResponseMessage() *NotifyVirtualSelectedParentChainChangedResponseMessage {
return &NotifyVirtualSelectedParentChainChangedResponseMessage{}
}
// VirtualSelectedParentChainChangedNotificationMessage is an appmessage corresponding to
// its respective RPC message
type VirtualSelectedParentChainChangedNotificationMessage struct {
baseMessage
RemovedChainBlockHashes []string
AddedChainBlocks []*ChainBlock
}
// ChainBlock represents a DAG chain-block
type ChainBlock struct {
Hash string
AcceptedBlocks []*AcceptedBlock
}
// AcceptedBlock represents a block accepted into the DAG
type AcceptedBlock struct {
Hash string
AcceptedTransactionIDs []string
}
// Command returns the protocol command string for the message
func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() MessageCommand {
return CmdVirtualSelectedParentChainChangedNotificationMessage
}
// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message
func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes []string,
addedChainBlocks []*ChainBlock) *VirtualSelectedParentChainChangedNotificationMessage {
return &VirtualSelectedParentChainChangedNotificationMessage{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
}

View File

@@ -1,37 +0,0 @@
package appmessage
// StopNotifyingUTXOsChangedRequestMessage is an appmessage corresponding to
// its respective RPC message
type StopNotifyingUTXOsChangedRequestMessage struct {
baseMessage
Addresses []string
}
// Command returns the protocol command string for the message
func (msg *StopNotifyingUTXOsChangedRequestMessage) Command() MessageCommand {
return CmdStopNotifyingUTXOsChangedRequestMessage
}
// NewStopNotifyingUTXOsChangedRequestMessage returns a instance of the message
func NewStopNotifyingUTXOsChangedRequestMessage(addresses []string) *StopNotifyingUTXOsChangedRequestMessage {
return &StopNotifyingUTXOsChangedRequestMessage{
Addresses: addresses,
}
}
// StopNotifyingUTXOsChangedResponseMessage is an appmessage corresponding to
// its respective RPC message
type StopNotifyingUTXOsChangedResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *StopNotifyingUTXOsChangedResponseMessage) Command() MessageCommand {
return CmdStopNotifyingUTXOsChangedResponseMessage
}
// NewStopNotifyingUTXOsChangedResponseMessage returns a instance of the message
func NewStopNotifyingUTXOsChangedResponseMessage() *StopNotifyingUTXOsChangedResponseMessage {
return &StopNotifyingUTXOsChangedResponseMessage{}
}

View File

@@ -4,7 +4,7 @@ package appmessage
// its respective RPC message
type SubmitBlockRequestMessage struct {
baseMessage
Block *MsgBlock
BlockHex string
}
// Command returns the protocol command string for the message
@@ -13,39 +13,17 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
}
// NewSubmitBlockRequestMessage returns a instance of the message
func NewSubmitBlockRequestMessage(block *MsgBlock) *SubmitBlockRequestMessage {
func NewSubmitBlockRequestMessage(blockHex string) *SubmitBlockRequestMessage {
return &SubmitBlockRequestMessage{
Block: block,
BlockHex: blockHex,
}
}
// RejectReason describes the reason why a block sent by SubmitBlock was rejected
type RejectReason byte
// RejectReason constants
// Not using iota, since in the .proto file those are hardcoded
const (
RejectReasonNone RejectReason = 0
RejectReasonBlockInvalid RejectReason = 1
RejectReasonIsInIBD RejectReason = 2
)
var rejectReasonToString = map[RejectReason]string{
RejectReasonNone: "None",
RejectReasonBlockInvalid: "Block is invalid",
RejectReasonIsInIBD: "Node is in IBD",
}
func (rr RejectReason) String() string {
return rejectReasonToString[rr]
}
// SubmitBlockResponseMessage is an appmessage corresponding to
// its respective RPC message
type SubmitBlockResponseMessage struct {
baseMessage
RejectReason RejectReason
Error *RPCError
Error *RPCError
}
// Command returns the protocol command string for the message

View File

@@ -4,7 +4,7 @@ package appmessage
// its respective RPC message
type SubmitTransactionRequestMessage struct {
baseMessage
Transaction *RPCTransaction
TransactionHex string
}
// Command returns the protocol command string for the message
@@ -13,9 +13,9 @@ func (msg *SubmitTransactionRequestMessage) Command() MessageCommand {
}
// NewSubmitTransactionRequestMessage returns a instance of the message
func NewSubmitTransactionRequestMessage(transaction *RPCTransaction) *SubmitTransactionRequestMessage {
func NewSubmitTransactionRequestMessage(transactionHex string) *SubmitTransactionRequestMessage {
return &SubmitTransactionRequestMessage{
Transaction: transaction,
TransactionHex: transactionHex,
}
}
@@ -23,7 +23,7 @@ func NewSubmitTransactionRequestMessage(transaction *RPCTransaction) *SubmitTran
// its respective RPC message
type SubmitTransactionResponseMessage struct {
baseMessage
TransactionID string
TxID string
Error *RPCError
}
@@ -34,58 +34,8 @@ func (msg *SubmitTransactionResponseMessage) Command() MessageCommand {
}
// NewSubmitTransactionResponseMessage returns a instance of the message
func NewSubmitTransactionResponseMessage(transactionID string) *SubmitTransactionResponseMessage {
func NewSubmitTransactionResponseMessage(txID string) *SubmitTransactionResponseMessage {
return &SubmitTransactionResponseMessage{
TransactionID: transactionID,
TxID: txID,
}
}
// RPCTransaction is a kaspad transaction representation meant to be
// used over RPC
type RPCTransaction struct {
Version uint16
Inputs []*RPCTransactionInput
Outputs []*RPCTransactionOutput
LockTime uint64
SubnetworkID string
Gas uint64
PayloadHash string
Payload string
}
// RPCTransactionInput is a kaspad transaction input representation
// meant to be used over RPC
type RPCTransactionInput struct {
PreviousOutpoint *RPCOutpoint
SignatureScript string
Sequence uint64
}
// RPCScriptPublicKey is a kaspad ScriptPublicKey representation
type RPCScriptPublicKey struct {
Version uint16
Script string
}
// RPCTransactionOutput is a kaspad transaction output representation
// meant to be used over RPC
type RPCTransactionOutput struct {
Amount uint64
ScriptPublicKey *RPCScriptPublicKey
}
// RPCOutpoint is a kaspad outpoint representation meant to be used
// over RPC
type RPCOutpoint struct {
TransactionID string
Index uint32
}
// RPCUTXOEntry is a kaspad utxo entry representation meant to be used
// over RPC
type RPCUTXOEntry struct {
Amount uint64
ScriptPublicKey *RPCScriptPublicKey
BlockBlueScore uint64
IsCoinbase bool
}

View File

@@ -1,39 +0,0 @@
package appmessage
// UnbanRequestMessage is an appmessage corresponding to
// its respective RPC message
type UnbanRequestMessage struct {
baseMessage
IP string
}
// Command returns the protocol command string for the message
func (msg *UnbanRequestMessage) Command() MessageCommand {
return CmdUnbanRequestMessage
}
// NewUnbanRequestMessage returns an instance of the message
func NewUnbanRequestMessage(ip string) *UnbanRequestMessage {
return &UnbanRequestMessage{
IP: ip,
}
}
// UnbanResponseMessage is an appmessage corresponding to
// its respective RPC message
type UnbanResponseMessage struct {
baseMessage
Error *RPCError
}
// Command returns the protocol command string for the message
func (msg *UnbanResponseMessage) Command() MessageCommand {
return CmdUnbanResponseMessage
}
// NewUnbanResponseMessage returns a instance of the message
func NewUnbanResponseMessage() *UnbanResponseMessage {
return &UnbanResponseMessage{}
}

View File

@@ -4,12 +4,6 @@ import (
"fmt"
"sync/atomic"
"github.com/kaspanet/kaspad/domain/utxoindex"
infrastructuredatabase "github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
@@ -17,7 +11,13 @@ import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol"
"github.com/kaspanet/kaspad/app/rpc"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/blockdag/indexers"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/domain/mining"
"github.com/kaspanet/kaspad/domain/txscript"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/db/dbaccess"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/dnsseed"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
@@ -77,43 +77,40 @@ func (a *ComponentManager) Stop() {
// NewComponentManager returns a new ComponentManager instance.
// Use Start() to begin all services within this ComponentManager
func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database, interrupt chan<- struct{}) (
*ComponentManager, error) {
func NewComponentManager(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, interrupt chan<- struct{}) (*ComponentManager, error) {
indexManager, acceptanceIndex := setupIndexes(cfg)
domain, err := domain.New(cfg.ActiveNetParams, db, cfg.IsArchivalNode)
sigCache := txscript.NewSigCache(cfg.SigCacheMaxSize)
// Create a new block DAG instance with the appropriate configuration.
dag, err := setupDAG(cfg, databaseContext, sigCache, indexManager)
if err != nil {
return nil, err
}
txMempool := setupMempool(cfg, dag, sigCache)
netAdapter, err := netadapter.NewNetAdapter(cfg)
if err != nil {
return nil, err
}
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg), db)
addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg))
if err != nil {
return nil, err
}
var utxoIndex *utxoindex.UTXOIndex
if cfg.UTXOIndex {
utxoIndex, err = utxoindex.New(domain.Consensus(), db)
if err != nil {
return nil, err
}
log.Infof("UTXO index started")
}
connectionManager, err := connmanager.New(cfg, netAdapter, addressManager)
if err != nil {
return nil, err
}
protocolManager, err := protocol.NewManager(cfg, domain, netAdapter, addressManager, connectionManager)
protocolManager, err := protocol.NewManager(cfg, dag, netAdapter, addressManager, txMempool, connectionManager)
if err != nil {
return nil, err
}
rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, interrupt)
rpcManager := setupRPC(cfg, txMempool, dag, sigCache, netAdapter, protocolManager, connectionManager, addressManager, acceptanceIndex, interrupt)
return &ComponentManager{
cfg: cfg,
@@ -128,31 +125,60 @@ func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database,
func setupRPC(
cfg *config.Config,
domain domain.Domain,
txMempool *mempool.TxPool,
dag *blockdag.BlockDAG,
sigCache *txscript.SigCache,
netAdapter *netadapter.NetAdapter,
protocolManager *protocol.Manager,
connectionManager *connmanager.ConnectionManager,
addressManager *addressmanager.AddressManager,
utxoIndex *utxoindex.UTXOIndex,
acceptanceIndex *indexers.AcceptanceIndex,
shutDownChan chan<- struct{},
) *rpc.Manager {
rpcManager := rpc.NewManager(
cfg,
domain,
netAdapter,
protocolManager,
connectionManager,
addressManager,
utxoIndex,
shutDownChan,
)
blockTemplateGenerator := mining.NewBlkTmplGenerator(&mining.Policy{BlockMaxMass: cfg.BlockMaxMass}, txMempool, dag, sigCache)
rpcManager := rpc.NewManager(cfg, netAdapter, dag, protocolManager, connectionManager, blockTemplateGenerator, txMempool, addressManager, acceptanceIndex, shutDownChan)
protocolManager.SetOnBlockAddedToDAGHandler(rpcManager.NotifyBlockAddedToDAG)
protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride)
protocolManager.SetOnTransactionAddedToMempoolHandler(rpcManager.NotifyTransactionAddedToMempool)
dag.Subscribe(func(notification *blockdag.Notification) {
err := handleBlockDAGNotifications(notification, acceptanceIndex, rpcManager)
if err != nil {
panic(err)
}
})
return rpcManager
}
func handleBlockDAGNotifications(notification *blockdag.Notification,
acceptanceIndex *indexers.AcceptanceIndex, rpcManager *rpc.Manager) error {
switch notification.Type {
case blockdag.NTChainChanged:
if acceptanceIndex == nil {
return nil
}
chainChangedNotificationData := notification.Data.(*blockdag.ChainChangedNotificationData)
err := rpcManager.NotifyChainChanged(chainChangedNotificationData.RemovedChainBlockHashes,
chainChangedNotificationData.AddedChainBlockHashes)
if err != nil {
return err
}
case blockdag.NTFinalityConflict:
finalityConflictNotificationData := notification.Data.(*blockdag.FinalityConflictNotificationData)
err := rpcManager.NotifyFinalityConflict(finalityConflictNotificationData.ViolatingBlockHash.String())
if err != nil {
return err
}
case blockdag.NTFinalityConflictResolved:
finalityConflictResolvedNotificationData := notification.Data.(*blockdag.FinalityConflictResolvedNotificationData)
err := rpcManager.NotifyFinalityConflictResolved(finalityConflictResolvedNotificationData.FinalityBlockHash.String())
if err != nil {
return err
}
}
return nil
}
func (a *ComponentManager) maybeSeedFromDNS() {
if !a.cfg.DisableDNSSeed {
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
@@ -162,7 +188,9 @@ func (a *ComponentManager) maybeSeedFromDNS() {
// source. So we'll take first returned address as source.
a.addressManager.AddAddresses(addresses...)
})
}
if a.cfg.GRPCSeed != "" {
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, appmessage.SFNodeNetwork, false, nil,
func(addresses []*appmessage.NetAddress) {
a.addressManager.AddAddresses(addresses...)
@@ -170,6 +198,56 @@ func (a *ComponentManager) maybeSeedFromDNS() {
}
}
func setupDAG(cfg *config.Config, databaseContext *dbaccess.DatabaseContext,
sigCache *txscript.SigCache, indexManager blockdag.IndexManager) (*blockdag.BlockDAG, error) {
dag, err := blockdag.New(&blockdag.Config{
DatabaseContext: databaseContext,
DAGParams: cfg.NetParams(),
TimeSource: blockdag.NewTimeSource(),
SigCache: sigCache,
IndexManager: indexManager,
SubnetworkID: cfg.SubnetworkID,
MaxUTXOCacheSize: cfg.MaxUTXOCacheSize,
})
return dag, err
}
func setupIndexes(cfg *config.Config) (blockdag.IndexManager, *indexers.AcceptanceIndex) {
// Create indexes if needed.
var indexes []indexers.Indexer
var acceptanceIndex *indexers.AcceptanceIndex
if cfg.AcceptanceIndex {
log.Info("acceptance index is enabled")
acceptanceIndex = indexers.NewAcceptanceIndex()
indexes = append(indexes, acceptanceIndex)
}
// Create an index manager if any of the optional indexes are enabled.
if len(indexes) < 0 {
return nil, nil
}
indexManager := indexers.NewManager(indexes)
return indexManager, acceptanceIndex
}
func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript.SigCache) *mempool.TxPool {
mempoolConfig := mempool.Config{
Policy: mempool.Policy{
AcceptNonStd: cfg.RelayNonStd,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: config.DefaultMaxOrphanTxSize,
MinRelayTxFee: cfg.MinRelayTxFee,
MaxTxVersion: 1,
},
CalcTxSequenceLockFromReferencedUTXOEntries: dag.CalcTxSequenceLockFromReferencedUTXOEntries,
SigCache: sigCache,
DAG: dag,
}
return mempool.New(&mempoolConfig)
}
// P2PNodeID returns the network ID associated with this ComponentManager
func (a *ComponentManager) P2PNodeID() *id.ID {
return a.netAdapter.ID()

View File

@@ -7,6 +7,8 @@ package app
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("KASD")
var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -0,0 +1,64 @@
// Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blocklogger
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/util"
)
var (
receivedLogBlocks int64
receivedLogTx int64
lastBlockLogTime = mstime.Now()
mtx sync.Mutex
)
// LogBlock logs a new block blue score as an information message
// to show progress to the user. In order to prevent spam, it limits logging to
// one message every 10 seconds with duration and totals included.
func LogBlock(block *util.Block) error {
mtx.Lock()
defer mtx.Unlock()
receivedLogBlocks++
receivedLogTx += int64(len(block.MsgBlock().Transactions))
now := mstime.Now()
duration := now.Sub(lastBlockLogTime)
if duration < time.Second*10 {
return nil
}
// Truncate the duration to 10s of milliseconds.
tDuration := duration.Round(10 * time.Millisecond)
// Log information about new block blue score.
blockStr := "blocks"
if receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if receivedLogTx == 1 {
txStr = "transaction"
}
blueScore, err := block.BlueScore()
if err != nil {
return err
}
log.Infof("Processed %d %s in the last %s (%d %s, blue score %d, %s)",
receivedLogBlocks, blockStr, tDuration, receivedLogTx,
txStr, blueScore, block.MsgBlock().Header.Timestamp)
receivedLogBlocks = 0
receivedLogTx = 0
lastBlockLogTime = now
return nil
}

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.PROT)

View File

@@ -1,89 +1,49 @@
package flowcontext
import (
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"sync/atomic"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// OnNewBlock updates the mempool after a new block arrival, and
// relays newly unorphaned transactions and possibly rebroadcast
// manually added transactions when not in IBD.
func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock,
blockInsertionResult *externalapi.BlockInsertionResult) error {
hash := consensushashing.BlockHash(block)
log.Debugf("OnNewBlock start for block %s", hash)
defer log.Debugf("OnNewBlock end for block %s", hash)
unorphaningResults, err := f.UnorphanBlocks(block)
func (f *FlowContext) OnNewBlock(block *util.Block) error {
transactionsAcceptedToMempool, err := f.txPool.HandleNewBlock(block)
if err != nil {
return err
}
log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphaningResults))
newBlocks := []*externalapi.DomainBlock{block}
newBlockInsertionResults := []*externalapi.BlockInsertionResult{blockInsertionResult}
for _, unorphaningResult := range unorphaningResults {
newBlocks = append(newBlocks, unorphaningResult.block)
newBlockInsertionResults = append(newBlockInsertionResults, unorphaningResult.blockInsertionResult)
}
for i, newBlock := range newBlocks {
log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash)
_, err = f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions)
if f.onBlockAddedToDAGHandler != nil {
err := f.onBlockAddedToDAGHandler(block)
if err != nil {
return err
}
if f.onBlockAddedToDAGHandler != nil {
log.Debugf("OnNewBlock: calling f.onBlockAddedToDAGHandler for block %s", hash)
blockInsertionResult = newBlockInsertionResults[i]
err := f.onBlockAddedToDAGHandler(newBlock, blockInsertionResult)
if err != nil {
return err
}
}
}
return nil
return f.broadcastTransactionsAfterBlockAdded(block, transactionsAcceptedToMempool)
}
// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set
// resets due to pruning point change via IBD.
func (f *FlowContext) OnPruningPointUTXOSetOverride() error {
if f.onPruningPointUTXOSetOverrideHandler != nil {
return f.onPruningPointUTXOSetOverrideHandler()
}
return nil
}
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(
block *externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error {
func (f *FlowContext) broadcastTransactionsAfterBlockAdded(block *util.Block, transactionsAcceptedToMempool []*util.Tx) error {
f.updateTransactionsToRebroadcast(block)
// Don't relay transactions when in IBD.
if f.IsIBDRunning() {
if atomic.LoadUint32(&f.isInIBD) != 0 {
return nil
}
var txIDsToRebroadcast []*externalapi.DomainTransactionID
var txIDsToRebroadcast []*daghash.TxID
if f.shouldRebroadcastTransactions() {
txIDsToRebroadcast = f.txIDsToRebroadcast()
}
txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
txIDsToBroadcast := make([]*daghash.TxID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast))
for i, tx := range transactionsAcceptedToMempool {
txIDsToBroadcast[i] = consensushashing.TransactionID(tx)
txIDsToBroadcast[i] = tx.ID()
}
offset := len(transactionsAcceptedToMempool)
for i, txID := range txIDsToRebroadcast {
@@ -107,66 +67,14 @@ func (f *FlowContext) SharedRequestedBlocks() *blockrelay.SharedRequestedBlocks
}
// AddBlock adds the given block to the DAG and propagates it.
func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error {
if len(block.Transactions) == 0 {
return protocolerrors.Errorf(false, "cannot add header only block")
}
blockInsertionResult, err := f.Domain().Consensus().ValidateAndInsertBlock(block)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err)
}
return err
}
err = f.OnNewBlock(block, blockInsertionResult)
func (f *FlowContext) AddBlock(block *util.Block, flags blockdag.BehaviorFlags) error {
_, _, err := f.DAG().ProcessBlock(block, flags)
if err != nil {
return err
}
return f.Broadcast(appmessage.NewMsgInvBlock(consensushashing.BlockHash(block)))
}
// IsIBDRunning returns true if IBD is currently marked as running
func (f *FlowContext) IsIBDRunning() bool {
f.ibdPeerMutex.RLock()
defer f.ibdPeerMutex.RUnlock()
return f.ibdPeer != nil
}
// TrySetIBDRunning attempts to set `isInIBD`. Returns false
// if it is already set
func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool {
f.ibdPeerMutex.Lock()
defer f.ibdPeerMutex.Unlock()
if f.ibdPeer != nil {
return false
err = f.OnNewBlock(block)
if err != nil {
return err
}
f.ibdPeer = ibdPeer
log.Infof("IBD started")
return true
}
// UnsetIBDRunning unsets isInIBD
func (f *FlowContext) UnsetIBDRunning() {
f.ibdPeerMutex.Lock()
defer f.ibdPeerMutex.Unlock()
if f.ibdPeer == nil {
panic("attempted to unset isInIBD when it was not set to begin with")
}
f.ibdPeer = nil
log.Infof("IBD finished")
}
// IBDPeer returns the current IBD peer or null if the node is not
// in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
f.ibdPeerMutex.RLock()
defer f.ibdPeerMutex.RUnlock()
return f.ibdPeer
return f.Broadcast(appmessage.NewMsgInvBlock(block.Hash()))
}

View File

@@ -0,0 +1,8 @@
package flowcontext
import "github.com/kaspanet/kaspad/domain/blockdag"
// DAG returns the DAG associated to the flow context.
func (f *FlowContext) DAG() *blockdag.BlockDAG {
return f.dag
}

View File

@@ -1,10 +0,0 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/domain"
)
// Domain returns the Domain object associated to the flow context.
func (f *FlowContext) Domain() domain.Domain {
return f.domain
}

View File

@@ -18,11 +18,11 @@ import (
func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) {
isErrRouteClosed := errors.Is(err, router.ErrRouteClosed)
if !isErrRouteClosed {
if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
if protocolErr := &(protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) {
panic(err)
}
log.Errorf("error from %s: %s", flowName, err)
log.Errorf("error from %s: %+v", flowName, err)
}
if atomic.AddUint32(isStopping, 1) == 1 {

View File

@@ -1,31 +1,26 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/util/mstime"
"sync"
"time"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain"
"github.com/kaspanet/kaspad/app/protocol/flows/blockrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/id"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
)
// OnBlockAddedToDAGHandler is a handler function that's triggered
// when a block is added to the DAG
type OnBlockAddedToDAGHandler func(block *externalapi.DomainBlock, blockInsertionResult *externalapi.BlockInsertionResult) error
// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set
// resets due to pruning point change via IBD.
type OnPruningPointUTXOSetOverrideHandler func() error
type OnBlockAddedToDAGHandler func(block *util.Block) error
// OnTransactionAddedToMempoolHandler is a handler function that's triggered
// when a transaction is added to the mempool
@@ -36,49 +31,45 @@ type OnTransactionAddedToMempoolHandler func()
type FlowContext struct {
cfg *config.Config
netAdapter *netadapter.NetAdapter
domain domain.Domain
txPool *mempool.TxPool
dag *blockdag.BlockDAG
addressManager *addressmanager.AddressManager
connectionManager *connmanager.ConnectionManager
timeStarted int64
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
onBlockAddedToDAGHandler OnBlockAddedToDAGHandler
onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler
transactionsToRebroadcastLock sync.Mutex
transactionsToRebroadcast map[externalapi.DomainTransactionID]*externalapi.DomainTransaction
transactionsToRebroadcast map[daghash.TxID]*util.Tx
lastRebroadcastTime time.Time
sharedRequestedTransactions *transactionrelay.SharedRequestedTransactions
sharedRequestedTransactions *relaytransactions.SharedRequestedTransactions
sharedRequestedBlocks *blockrelay.SharedRequestedBlocks
ibdPeer *peerpkg.Peer
ibdPeerMutex sync.RWMutex
isInIBD uint32
startIBDMutex sync.Mutex
ibdPeer *peerpkg.Peer
peers map[id.ID]*peerpkg.Peer
peersMutex sync.RWMutex
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
orphansMutex sync.RWMutex
}
// New returns a new instance of FlowContext.
func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanager.AddressManager,
netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext {
func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addressmanager.AddressManager,
txPool *mempool.TxPool, netAdapter *netadapter.NetAdapter,
connectionManager *connmanager.ConnectionManager) *FlowContext {
return &FlowContext{
cfg: cfg,
netAdapter: netAdapter,
domain: domain,
dag: dag,
addressManager: addressManager,
connectionManager: connectionManager,
sharedRequestedTransactions: transactionrelay.NewSharedRequestedTransactions(),
txPool: txPool,
sharedRequestedTransactions: relaytransactions.NewSharedRequestedTransactions(),
sharedRequestedBlocks: blockrelay.NewSharedRequestedBlocks(),
peers: make(map[id.ID]*peerpkg.Peer),
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
timeStarted: mstime.Now().UnixMilliseconds(),
transactionsToRebroadcast: make(map[daghash.TxID]*util.Tx),
}
}
@@ -87,11 +78,6 @@ func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlo
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
}
// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler
func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) {
f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler
}
// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler
func (f *FlowContext) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler) {
f.onTransactionAddedToMempoolHandler = onTransactionAddedToMempoolHandler

View File

@@ -0,0 +1,89 @@
package flowcontext
import (
"sync/atomic"
"time"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/domain/blockdag"
)
// StartIBDIfRequired selects a peer and starts IBD against it
// if required
func (f *FlowContext) StartIBDIfRequired() {
f.startIBDMutex.Lock()
defer f.startIBDMutex.Unlock()
if f.IsInIBD() {
return
}
peer := f.selectPeerForIBD(f.dag)
if peer == nil {
spawn("StartIBDIfRequired-requestSelectedTipsIfRequired", f.requestSelectedTipsIfRequired)
return
}
atomic.StoreUint32(&f.isInIBD, 1)
f.ibdPeer = peer
spawn("StartIBDIfRequired-peer.StartIBD", peer.StartIBD)
}
// IsInIBD is true if IBD is currently running
func (f *FlowContext) IsInIBD() bool {
return atomic.LoadUint32(&f.isInIBD) != 0
}
// selectPeerForIBD returns the first peer whose selected tip
// hash is not in our DAG
func (f *FlowContext) selectPeerForIBD(dag *blockdag.BlockDAG) *peerpkg.Peer {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peerSelectedTipHash := peer.SelectedTipHash()
if !dag.IsInDAG(peerSelectedTipHash) {
return peer
}
}
return nil
}
func (f *FlowContext) requestSelectedTipsIfRequired() {
if f.isDAGTimeCurrent() {
return
}
f.requestSelectedTips()
}
func (f *FlowContext) isDAGTimeCurrent() bool {
const minDurationToRequestSelectedTips = time.Minute
return f.dag.Now().Sub(f.dag.SelectedTipHeader().Timestamp) > minDurationToRequestSelectedTips
}
func (f *FlowContext) requestSelectedTips() {
f.peersMutex.RLock()
defer f.peersMutex.RUnlock()
for _, peer := range f.peers {
peer.RequestSelectedTipIfRequired()
}
}
// FinishIBD finishes the current IBD flow and starts a new one if required.
func (f *FlowContext) FinishIBD() {
f.ibdPeer = nil
atomic.StoreUint32(&f.isInIBD, 0)
f.StartIBDIfRequired()
}
// IBDPeer returns the currently active IBD peer.
// Returns nil if we aren't currently in IBD
func (f *FlowContext) IBDPeer() *peerpkg.Peer {
if !f.IsInIBD() {
return nil
}
return f.ibdPeer
}

View File

@@ -2,6 +2,8 @@ package flowcontext
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -1,213 +0,0 @@
package flowcontext
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashset"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
)
// maxOrphans is the maximum amount of orphans allowed in the
// orphans collection. This number is an approximation of how
// many orphans there can possibly be on average. It is based
// on: 2^orphanResolutionRange * PHANTOM K.
const maxOrphans = 600
// UnorphaningResult is the result of unorphaning a block
type UnorphaningResult struct {
block *externalapi.DomainBlock
blockInsertionResult *externalapi.BlockInsertionResult
}
// AddOrphan adds the block to the orphan set
func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) {
f.orphansMutex.Lock()
defer f.orphansMutex.Unlock()
orphanHash := consensushashing.BlockHash(orphanBlock)
f.orphans[*orphanHash] = orphanBlock
if len(f.orphans) > maxOrphans {
log.Debugf("Orphan collection size exceeded. Evicting a random orphan")
f.evictRandomOrphan()
}
log.Infof("Received a block with missing parents, adding to orphan pool: %s", orphanHash)
}
func (f *FlowContext) evictRandomOrphan() {
var toEvict externalapi.DomainHash
for hash := range f.orphans {
toEvict = hash
break
}
delete(f.orphans, toEvict)
log.Debugf("Evicted %s from the orphan collection", toEvict)
}
// IsOrphan returns whether the given blockHash belongs to an orphan block
func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool {
f.orphansMutex.RLock()
defer f.orphansMutex.RUnlock()
_, ok := f.orphans[*blockHash]
return ok
}
// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore.
func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*UnorphaningResult, error) {
f.orphansMutex.Lock()
defer f.orphansMutex.Unlock()
// Find all the children of rootBlock among the orphans
// and add them to the process queue
rootBlockHash := consensushashing.BlockHash(rootBlock)
processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{})
var unorphaningResults []*UnorphaningResult
for len(processQueue) > 0 {
var orphanHash externalapi.DomainHash
orphanHash, processQueue = processQueue[0], processQueue[1:]
orphanBlock := f.orphans[orphanHash]
log.Debugf("Considering to unorphan block %s with parents %s",
orphanHash, orphanBlock.Header.ParentHashes())
canBeUnorphaned := true
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash)
if err != nil {
return nil, err
}
if !orphanBlockParentInfo.Exists || orphanBlockParentInfo.BlockStatus == externalapi.StatusHeaderOnly {
log.Debugf("Cannot unorphan block %s. It's missing at "+
"least the following parent: %s", orphanHash, orphanBlockParentHash)
canBeUnorphaned = false
break
}
}
if canBeUnorphaned {
blockInsertionResult, unorphaningSucceeded, err := f.unorphanBlock(orphanHash)
if err != nil {
return nil, err
}
if unorphaningSucceeded {
unorphaningResults = append(unorphaningResults, &UnorphaningResult{
block: orphanBlock,
blockInsertionResult: blockInsertionResult,
})
processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue)
}
}
}
return unorphaningResults, nil
}
// addChildOrphansToProcessQueue finds all child orphans of `blockHash`
// and adds them to the given `processQueue` if they don't already exist
// inside of it
// Note that this method does not modify the given `processQueue`
func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.DomainHash,
processQueue []externalapi.DomainHash) []externalapi.DomainHash {
blockChildren := f.findChildOrphansOfBlock(blockHash)
for _, blockChild := range blockChildren {
exists := false
for _, queueOrphan := range processQueue {
if queueOrphan == blockChild {
exists = true
break
}
}
if !exists {
processQueue = append(processQueue, blockChild)
}
}
return processQueue
}
func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash {
var childOrphans []externalapi.DomainHash
for orphanHash, orphanBlock := range f.orphans {
for _, orphanBlockParentHash := range orphanBlock.Header.ParentHashes() {
if orphanBlockParentHash.Equal(blockHash) {
childOrphans = append(childOrphans, orphanHash)
break
}
}
}
return childOrphans
}
func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (*externalapi.BlockInsertionResult, bool, error) {
orphanBlock, ok := f.orphans[orphanHash]
if !ok {
return nil, false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash)
}
delete(f.orphans, orphanHash)
blockInsertionResult, err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock)
if err != nil {
if errors.As(err, &ruleerrors.RuleError{}) {
log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err)
return nil, false, nil
}
return nil, false, err
}
log.Infof("Unorphaned block %s", orphanHash)
return blockInsertionResult, true, nil
}
// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan
func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "GetOrphanRoots")
defer onEnd()
f.orphansMutex.RLock()
defer f.orphansMutex.RUnlock()
_, ok := f.orphans[*orphan]
if !ok {
return nil, false, nil
}
queue := []*externalapi.DomainHash{orphan}
addedToQueueSet := hashset.New()
addedToQueueSet.Add(orphan)
roots := []*externalapi.DomainHash{}
for len(queue) > 0 {
var current *externalapi.DomainHash
current, queue = queue[0], queue[1:]
block, ok := f.orphans[*current]
if !ok {
blockInfo, err := f.domain.Consensus().GetBlockInfo(current)
if err != nil {
return nil, false, err
}
if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
roots = append(roots, current)
} else {
log.Debugf("Block %s was skipped when checking for orphan roots: "+
"exists: %t, status: %s", current, blockInfo.Exists, blockInfo.BlockStatus)
}
continue
}
for _, parent := range block.Header.ParentHashes() {
if !addedToQueueSet.Contains(parent) {
queue = append(queue, parent)
addedToQueueSet.Add(parent)
}
}
}
return roots, true, nil
}

View File

@@ -1,43 +0,0 @@
package flowcontext
import "github.com/kaspanet/kaspad/util/mstime"
const (
maxSelectedParentTimeDiffToAllowMiningInMilliSeconds = 60 * 60 * 1000 // 1 Hour
)
// ShouldMine returns whether it's ok to use block template from this node
// for mining purposes.
func (f *FlowContext) ShouldMine() (bool, error) {
peers := f.Peers()
if len(peers) == 0 {
log.Debugf("The node is not connected, so ShouldMine returns false")
return false, nil
}
if f.IsIBDRunning() {
log.Debugf("IBD is running, so ShouldMine returns false")
return false, nil
}
virtualSelectedParent, err := f.domain.Consensus().GetVirtualSelectedParent()
if err != nil {
return false, err
}
virtualSelectedParentHeader, err := f.domain.Consensus().GetBlockHeader(virtualSelectedParent)
if err != nil {
return false, err
}
now := mstime.Now().UnixMilliseconds()
if now-virtualSelectedParentHeader.TimeInMilliseconds() < maxSelectedParentTimeDiffToAllowMiningInMilliSeconds {
log.Debugf("The selected tip timestamp is recent (%d), so ShouldMine returns true",
virtualSelectedParentHeader.TimeInMilliseconds())
return true, nil
}
log.Debugf("The selected tip timestamp is old (%d), so ShouldMine returns false",
virtualSelectedParentHeader.TimeInMilliseconds())
return false, nil
}

View File

@@ -1,38 +1,42 @@
package flowcontext
import (
"time"
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/flows/transactionrelay"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/app/protocol/flows/relaytransactions"
"github.com/kaspanet/kaspad/domain/mempool"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"time"
)
// AddTransaction adds transaction to the mempool and propagates it.
func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction) error {
func (f *FlowContext) AddTransaction(tx *util.Tx) error {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, false)
transactionsAcceptedToMempool, err := f.txPool.ProcessTransaction(tx, false)
if err != nil {
return err
}
transactionID := consensushashing.TransactionID(tx)
f.transactionsToRebroadcast[*transactionID] = tx
inv := appmessage.NewMsgInvTransaction([]*externalapi.DomainTransactionID{transactionID})
if len(transactionsAcceptedToMempool) > 1 {
return errors.New("got more than one accepted transactions when no orphans were allowed")
}
f.transactionsToRebroadcast[*tx.ID()] = tx
inv := appmessage.NewMsgInvTransaction([]*daghash.TxID{tx.ID()})
return f.Broadcast(inv)
}
func (f *FlowContext) updateTransactionsToRebroadcast(block *externalapi.DomainBlock) {
func (f *FlowContext) updateTransactionsToRebroadcast(block *util.Block) {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
// Note: if the block is red, its transactions won't be rebroadcasted
// anymore, although they are not included in the UTXO set.
// This is probably ok, since red blocks are quite rare.
for _, tx := range block.Transactions {
delete(f.transactionsToRebroadcast, *consensushashing.TransactionID(tx))
for _, tx := range block.Transactions() {
delete(f.transactionsToRebroadcast, *tx.ID())
}
}
@@ -41,25 +45,30 @@ func (f *FlowContext) shouldRebroadcastTransactions() bool {
return time.Since(f.lastRebroadcastTime) > rebroadcastInterval
}
func (f *FlowContext) txIDsToRebroadcast() []*externalapi.DomainTransactionID {
func (f *FlowContext) txIDsToRebroadcast() []*daghash.TxID {
f.transactionsToRebroadcastLock.Lock()
defer f.transactionsToRebroadcastLock.Unlock()
txIDs := make([]*externalapi.DomainTransactionID, len(f.transactionsToRebroadcast))
txIDs := make([]*daghash.TxID, len(f.transactionsToRebroadcast))
i := 0
for _, tx := range f.transactionsToRebroadcast {
txIDs[i] = consensushashing.TransactionID(tx)
txIDs[i] = tx.ID()
i++
}
return txIDs
}
// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing
// SharedRequestedTransactions returns a *relaytransactions.SharedRequestedTransactions for sharing
// data about requested transactions between different peers.
func (f *FlowContext) SharedRequestedTransactions() *transactionrelay.SharedRequestedTransactions {
func (f *FlowContext) SharedRequestedTransactions() *relaytransactions.SharedRequestedTransactions {
return f.sharedRequestedTransactions
}
// TxPool returns the transaction pool associated to the manager.
func (f *FlowContext) TxPool() *mempool.TxPool {
return f.txPool
}
// OnTransactionAddedToMempool notifies the handler function that a transaction
// has been added to the mempool
func (f *FlowContext) OnTransactionAddedToMempool() {

View File

@@ -5,12 +5,14 @@ import (
"github.com/kaspanet/kaspad/app/protocol/common"
peerpkg "github.com/kaspanet/kaspad/app/protocol/peer"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/network/addressmanager"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
)
// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow.
type ReceiveAddressesContext interface {
Config() *config.Config
AddressManager() *addressmanager.AddressManager
}
@@ -31,9 +33,20 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou
}
msgAddresses := message.(*appmessage.MsgAddresses)
if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax {
if len(msgAddresses.AddrList) > addressmanager.GetAddressesMax {
return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax)
}
return context.AddressManager().AddAddresses(msgAddresses.AddressList...)
if msgAddresses.IncludeAllSubnetworks {
return protocolerrors.Errorf(true, "got unexpected "+
"IncludeAllSubnetworks=true in [%s] command", msgAddresses.Command())
}
if !msgAddresses.SubnetworkID.IsEqual(context.Config().SubnetworkID) && msgAddresses.SubnetworkID != nil {
return protocolerrors.Errorf(false, "only full nodes and %s subnetwork IDs "+
"are allowed in [%s] command, but got subnetwork ID %s",
context.Config().SubnetworkID, msgAddresses.Command(), msgAddresses.SubnetworkID)
}
context.AddressManager().AddAddresses(msgAddresses.AddrList...)
return nil
}

View File

@@ -15,20 +15,20 @@ type SendAddressesContext interface {
// SendAddresses sends addresses to a peer that requests it.
func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error {
for {
_, err := incomingRoute.Dequeue()
if err != nil {
return err
}
addresses := context.AddressManager().Addresses()
msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses))
err = outgoingRoute.Enqueue(msgAddresses)
if err != nil {
return err
}
message, err := incomingRoute.Dequeue()
if err != nil {
return err
}
msgGetAddresses := message.(*appmessage.MsgRequestAddresses)
addresses := context.AddressManager().Addresses()
msgAddresses := appmessage.NewMsgAddresses(msgGetAddresses.IncludeAllSubnetworks, msgGetAddresses.SubnetworkID)
err = msgAddresses.AddAddresses(shuffleAddresses(addresses)...)
if err != nil {
return err
}
return outgoingRoute.Enqueue(msgAddresses)
}
// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message.

View File

@@ -1,29 +0,0 @@
package blockrelay
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/app/protocol/common"
"github.com/kaspanet/kaspad/app/protocol/protocolerrors"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func (flow *handleRelayInvsFlow) sendGetBlockLocator(lowHash *externalapi.DomainHash,
highHash *externalapi.DomainHash, limit uint32) error {
msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(lowHash, highHash, limit)
return flow.outgoingRoute.Enqueue(msgGetBlockLocator)
}
func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) {
message, err := flow.dequeueIncomingMessageAndSkipInvs(common.DefaultTimeout)
if err != nil {
return nil, err
}
msgBlockLocator, ok := message.(*appmessage.MsgBlockLocator)
if !ok {
return nil,
protocolerrors.Errorf(true, "received unexpected message type. "+
"expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command())
}
return msgBlockLocator.BlockLocatorHashes, nil
}

Some files were not shown because too many files have changed in this diff Show More