Compare commits

..

1 Commits

Author SHA1 Message Date
stasatdaglabs
71aca9c360 TEST COMMIT - DO NOT MERGE 2019-10-16 12:06:27 +03:00
567 changed files with 13551 additions and 18953 deletions

View File

@@ -1,7 +1,7 @@
ISC License
Copyright (c) 2018-2019 DAGLabs
Copyright (c) 2013-2018 The btcsuite developers
Copyright (c) 2013-20 18 The btcsuite developers
Copyright (c) 2015-2016 The Decred developers
Permission to use, copy, modify, and distribute this software for any

View File

@@ -3,7 +3,7 @@ btcd
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd)
btcd is an alternative full node bitcoin implementation written in Go (golang).
@@ -41,7 +41,7 @@ which are both under active development.
#### Windows - MSI Available
https://github.com/kaspanet/kaspad/releases
https://github.com/daglabs/btcd/releases
#### Linux/BSD/MacOSX/POSIX - Build from Source
@@ -64,8 +64,8 @@ recommended that `GOPATH` is set to a directory in your home directory such as
```bash
$ # Install dep: https://golang.github.io/dep/docs/installation.html
$ git clone https://github.com/kaspanet/kaspad $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ git clone https://github.com/daglabs/btcd $GOPATH/src/github.com/daglabs/btcd
$ cd $GOPATH/src/github.com/daglabs/btcd
$ dep ensure
$ go install . ./cmd/...
```
@@ -85,7 +85,7 @@ Install a newer MSI
- Run the following commands to update btcd, all dependencies, and install it:
```bash
$ cd $GOPATH/src/github.com/kaspanet/kaspad
$ cd $GOPATH/src/github.com/daglabs/btcd
$ git pull && dep ensure
$ go install . ./cmd/...
```
@@ -114,12 +114,12 @@ $ ./btcd
## Issue Tracker
The [integrated github issue tracker](https://github.com/kaspanet/kaspad/issues)
The [integrated github issue tracker](https://github.com/daglabs/btcd/issues)
is used for this project.
## Documentation
The documentation is a work-in-progress. It is located in the [docs](https://github.com/kaspanet/kaspad/tree/master/docs) folder.
The documentation is a work-in-progress. It is located in the [docs](https://github.com/daglabs/btcd/tree/master/docs) folder.
## GPG Verification Key

View File

@@ -10,7 +10,7 @@ import (
"encoding/base32"
"encoding/binary"
"encoding/json"
"github.com/pkg/errors"
"fmt"
"io"
"math/rand"
"net"
@@ -22,10 +22,10 @@ import (
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
type newBucket [newBucketCount]map[string]*KnownAddress
@@ -565,7 +565,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
}
r, err := os.Open(filePath)
if err != nil {
return errors.Errorf("%s error opening file: %s", filePath, err)
return fmt.Errorf("%s error opening file: %s", filePath, err)
}
defer r.Close()
@@ -573,11 +573,11 @@ func (a *AddrManager) deserializePeers(filePath string) error {
dec := json.NewDecoder(r)
err = dec.Decode(&sam)
if err != nil {
return errors.Errorf("error reading %s: %s", filePath, err)
return fmt.Errorf("error reading %s: %s", filePath, err)
}
if sam.Version != serialisationVersion {
return errors.Errorf("unknown version %d in serialized "+
return fmt.Errorf("unknown version %d in serialized "+
"addrmanager", sam.Version)
}
copy(a.key[:], sam.Key[:])
@@ -586,18 +586,18 @@ func (a *AddrManager) deserializePeers(filePath string) error {
ka := new(KnownAddress)
ka.na, err = a.DeserializeNetAddress(v.Addr)
if err != nil {
return errors.Errorf("failed to deserialize netaddress "+
return fmt.Errorf("failed to deserialize netaddress "+
"%s: %s", v.Addr, err)
}
ka.srcAddr, err = a.DeserializeNetAddress(v.Src)
if err != nil {
return errors.Errorf("failed to deserialize netaddress "+
return fmt.Errorf("failed to deserialize netaddress "+
"%s: %s", v.Src, err)
}
if v.SubnetworkID != "" {
ka.subnetworkID, err = subnetworkid.NewFromStr(v.SubnetworkID)
if err != nil {
return errors.Errorf("failed to deserialize subnetwork id "+
return fmt.Errorf("failed to deserialize subnetwork id "+
"%s: %s", v.SubnetworkID, err)
}
}
@@ -616,7 +616,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range subnetworkNewBucket {
ka, ok := a.addrIndex[val]
if !ok {
return errors.Errorf("newbucket contains %s but "+
return fmt.Errorf("newbucket contains %s but "+
"none in address list", val)
}
@@ -633,7 +633,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range newBucket {
ka, ok := a.addrIndex[val]
if !ok {
return errors.Errorf("full nodes newbucket contains %s but "+
return fmt.Errorf("full nodes newbucket contains %s but "+
"none in address list", val)
}
@@ -654,7 +654,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range subnetworkTriedBucket {
ka, ok := a.addrIndex[val]
if !ok {
return errors.Errorf("Tried bucket contains %s but "+
return fmt.Errorf("Tried bucket contains %s but "+
"none in address list", val)
}
@@ -669,7 +669,7 @@ func (a *AddrManager) deserializePeers(filePath string) error {
for _, val := range triedBucket {
ka, ok := a.addrIndex[val]
if !ok {
return errors.Errorf("Full nodes tried bucket contains %s but "+
return fmt.Errorf("Full nodes tried bucket contains %s but "+
"none in address list", val)
}
@@ -682,12 +682,12 @@ func (a *AddrManager) deserializePeers(filePath string) error {
// Sanity checking.
for k, v := range a.addrIndex {
if v.refs == 0 && !v.tried {
return errors.Errorf("address %s after serialisation "+
return fmt.Errorf("address %s after serialisation "+
"with no references", k)
}
if v.refs > 0 && v.tried {
return errors.Errorf("address %s after serialisation "+
return fmt.Errorf("address %s after serialisation "+
"which is both new and tried!", k)
}
}
@@ -724,11 +724,7 @@ func (a *AddrManager) Start() {
// Start the address ticker to save addresses periodically.
a.wg.Add(1)
spawn(a.addressHandler, a.handlePanic)
}
func (a *AddrManager) handlePanic() {
atomic.AddInt32(&a.shutdown, 1)
spawn(a.addressHandler)
}
// Stop gracefully shuts down the address manager by stopping the main handler.
@@ -778,11 +774,11 @@ func (a *AddrManager) AddAddressByIP(addrIP string, subnetworkID *subnetworkid.S
// Put it in wire.Netaddress
ip := net.ParseIP(addr)
if ip == nil {
return errors.Errorf("invalid ip address %s", addr)
return fmt.Errorf("invalid ip address %s", addr)
}
port, err := strconv.ParseUint(portStr, 10, 0)
if err != nil {
return errors.Errorf("invalid port %s: %s", portStr, err)
return fmt.Errorf("invalid port %s: %s", portStr, err)
}
na := wire.NewNetAddressIPPort(ip, uint16(port), 0)
a.AddAddress(na, na, subnetworkID) // XXX use correct src address
@@ -921,7 +917,7 @@ func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.S
return nil, err
}
if len(ips) == 0 {
return nil, errors.Errorf("no addresses found for %s", host)
return nil, fmt.Errorf("no addresses found for %s", host)
}
ip = ips[0]
}
@@ -1253,7 +1249,7 @@ func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.Sub
// with the given priority.
func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error {
if !IsRoutable(na) {
return errors.Errorf("address %s is not routable", na.IP)
return fmt.Errorf("address %s is not routable", na.IP)
}
a.lamtx.Lock()

View File

@@ -5,19 +5,16 @@
package addrmgr
import (
"bou.ke/monkey"
"errors"
"fmt"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
"net"
"reflect"
"testing"
"time"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/wire"
)
// naTest is used to describe a test to be performed against the NetAddressKey
@@ -116,17 +113,7 @@ func TestStartStop(t *testing.T) {
}
func TestAddAddressByIP(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
fmtErr := errors.Errorf("")
fmtErr := fmt.Errorf("")
addrErr := &net.AddrError{}
var tests = []struct {
addrIP string
@@ -170,16 +157,6 @@ func TestAddAddressByIP(t *testing.T) {
}
func TestAddLocalAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
var tests = []struct {
address wire.NetAddress
priority AddressPriority
@@ -233,16 +210,6 @@ func TestAddLocalAddress(t *testing.T) {
}
func TestAttempt(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testattempt", lookupFunc, nil)
// Add a new address and get it
@@ -265,16 +232,6 @@ func TestAttempt(t *testing.T) {
}
func TestConnected(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testconnected", lookupFunc, nil)
// Add a new address and get it
@@ -295,16 +252,6 @@ func TestConnected(t *testing.T) {
}
func TestNeedMoreAddresses(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testneedmoreaddresses", lookupFunc, nil)
addrsToAdd := 1500
b := n.NeedMoreAddresses()
@@ -337,16 +284,6 @@ func TestNeedMoreAddresses(t *testing.T) {
}
func TestGood(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("testgood", lookupFunc, nil)
addrsToAdd := 64 * 64
addrs := make([]*wire.NetAddress, addrsToAdd)
@@ -394,16 +331,6 @@ func TestGood(t *testing.T) {
}
func TestGoodChangeSubnetworkID(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
n := New("test_good_change_subnetwork_id", lookupFunc, nil)
addr := wire.NewNetAddressIPPort(net.IPv4(173, 144, 173, 111), 8333, 0)
addrKey := NetAddressKey(addr)
@@ -473,16 +400,6 @@ func TestGoodChangeSubnetworkID(t *testing.T) {
}
func TestGetAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
localSubnetworkID := &subnetworkid.SubnetworkID{0xff}
n := New("testgetaddress", lookupFunc, localSubnetworkID)
@@ -555,16 +472,6 @@ func TestGetAddress(t *testing.T) {
}
func TestGetBestLocalAddress(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
localAddrs := []wire.NetAddress{
{IP: net.ParseIP("192.168.0.100")},
{IP: net.ParseIP("::1")},

View File

@@ -7,7 +7,7 @@ package addrmgr
import (
"time"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/wire"
)
func TstKnownAddressIsBad(ka *KnownAddress) bool {

View File

@@ -7,9 +7,9 @@ package addrmgr
import (
"time"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/wire"
)
// KnownAddress tracks information about a known network address that is used

View File

@@ -9,8 +9,8 @@ import (
"testing"
"time"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/addrmgr"
"github.com/daglabs/btcd/wire"
)
func TestChance(t *testing.T) {

View File

@@ -5,9 +5,9 @@
package addrmgr
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
var spawn = panics.GoroutineWrapperFuncWithPanicHandler(log)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)

View File

@@ -8,9 +8,9 @@ import (
"fmt"
"net"
"github.com/kaspanet/kaspad/config"
"github.com/daglabs/btcd/config"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/wire"
)
var (
@@ -225,7 +225,7 @@ func IsValid(na *wire.NetAddress) bool {
// the public internet. This is true as long as the address is valid and is not
// in any reserved ranges.
func IsRoutable(na *wire.NetAddress) bool {
if config.ActiveConfig().NetParams().AcceptUnroutable {
if config.ActiveNetParams().AcceptUnroutable {
return !IsLocal(na)
}

View File

@@ -5,29 +5,16 @@
package addrmgr_test
import (
"bou.ke/monkey"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/dagconfig"
"net"
"testing"
"github.com/kaspanet/kaspad/addrmgr"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/addrmgr"
"github.com/daglabs/btcd/wire"
)
// TestIPTypes ensures the various functions which determine the type of an IP
// address based on RFCs work as intended.
func TestIPTypes(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
type ipTest struct {
in wire.NetAddress
rfc1918 bool
@@ -158,16 +145,6 @@ func TestIPTypes(t *testing.T) {
// TestGroupKey tests the GroupKey function to ensure it properly groups various
// IP addresses.
func TestGroupKey(t *testing.T) {
activeConfigPatch := monkey.Patch(config.ActiveConfig, func() *config.Config {
return &config.Config{
Flags: &config.Flags{
NetworkFlags: config.NetworkFlags{
ActiveNetParams: &dagconfig.SimNetParams},
},
}
})
defer activeConfigPatch.Unpatch()
tests := []struct {
name string
ip string

126
apiserver/config/config.go Normal file
View File

@@ -0,0 +1,126 @@
package config
import (
"errors"
"github.com/daglabs/btcd/apiserver/logger"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util"
"github.com/jessevdk/go-flags"
"path/filepath"
)
const (
defaultLogFilename = "apiserver.log"
defaultErrLogFilename = "apiserver_err.log"
)
var (
// activeNetParams are the currently active net params
activeNetParams dagconfig.Params
)
var (
// Default configuration options
defaultLogDir = util.AppDataDir("apiserver", false)
defaultDBAddress = "localhost:3306"
defaultHTTPListen = "0.0.0.0:8080"
)
// Config defines the configuration options for the API server.
type Config struct {
LogDir string `long:"logdir" description:"Directory to log output."`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
DBAddress string `long:"dbaddress" description:"Database address"`
DBUser string `long:"dbuser" description:"Database user" required:"true"`
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
DBName string `long:"dbname" description:"Database name" required:"true"`
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
TestNet bool `long:"testnet" description:"Connect to testnet"`
SimNet bool `long:"simnet" description:"Connect to the simulation test network"`
DevNet bool `long:"devnet" description:"Connect to the development test network"`
}
// Parse parses the CLI arguments and returns a config struct.
func Parse() (*Config, error) {
cfg := &Config{
LogDir: defaultLogDir,
DBAddress: defaultDBAddress,
HTTPListen: defaultHTTPListen,
}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if !cfg.Migrate {
if cfg.RPCUser == "" {
return nil, errors.New("--rpcuser is required if --migrate flag is not used")
}
if cfg.RPCPassword == "" {
return nil, errors.New("--rpcpass is required if --migrate flag is not used")
}
if cfg.RPCServer == "" {
return nil, errors.New("--rpcserver is required if --migrate flag is not used")
}
}
if cfg.RPCCert == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if cfg.RPCCert != "" && cfg.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
err = resolveNetwork(cfg)
if err != nil {
return nil, err
}
logFile := filepath.Join(cfg.LogDir, defaultLogFilename)
errLogFile := filepath.Join(cfg.LogDir, defaultErrLogFilename)
logger.InitLog(logFile, errLogFile)
return cfg, nil
}
func resolveNetwork(cfg *Config) error {
// Multiple networks can't be selected simultaneously.
numNets := 0
if cfg.TestNet {
numNets++
}
if cfg.SimNet {
numNets++
}
if cfg.DevNet {
numNets++
}
if numNets > 1 {
return errors.New("multiple net params (testnet, simnet, devnet, etc.) can't be used " +
"together -- choose one of them")
}
activeNetParams = dagconfig.MainNetParams
switch {
case cfg.TestNet:
activeNetParams = dagconfig.TestNetParams
case cfg.SimNet:
activeNetParams = dagconfig.SimNetParams
case cfg.DevNet:
activeNetParams = dagconfig.DevNetParams
}
return nil
}
// ActiveNetParams returns the currently active net params
func ActiveNetParams() *dagconfig.Params {
return &activeNetParams
}

View File

@@ -0,0 +1,79 @@
package controllers
import (
"encoding/hex"
"fmt"
"net/http"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/models"
"github.com/daglabs/btcd/apiserver/utils"
"github.com/daglabs/btcd/util/daghash"
)
const (
// OrderAscending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderAscending = "asc"
// OrderDescending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderDescending = "desc"
)
const maxGetBlocksLimit = 100
// GetBlockByHashHandler returns a block by a given hash.
func GetBlockByHashHandler(blockHash string) (interface{}, *utils.HandlerError) {
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
fmt.Sprintf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, utils.NewInternalServerHandlerError(err.Error())
}
block := &models.Block{}
dbResult := db.Where(&models.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
dbErrors := dbResult.GetErrors()
if utils.IsDBRecordNotFoundError(dbErrors) {
return nil, utils.NewHandlerError(http.StatusNotFound, "No block with the given block hash was found.")
}
if utils.HasDBError(dbErrors) {
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transactions from the database:", dbResult.GetErrors())
}
return convertBlockModelToBlockResponse(block), nil
}
// GetBlocksHandler searches for all blocks
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, *utils.HandlerError) {
if limit > maxGetBlocksLimit {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
}
blocks := []*models.Block{}
db, err := database.DB()
if err != nil {
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
query := db.
Limit(limit).
Offset(skip).
Preload("AcceptingBlock")
if order == OrderAscending {
query = query.Order("`id` ASC")
} else if order == OrderDescending {
query = query.Order("`id` DESC")
} else {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("'%s' is not a valid order", order))
}
query.Find(&blocks)
blockResponses := make([]*blockResponse, len(blocks))
for i, block := range blocks {
blockResponses[i] = convertBlockModelToBlockResponse(block)
}
return blockResponses, nil
}

View File

@@ -1,13 +1,11 @@
package controllers
import (
"github.com/kaspanet/kaspad/kasparov/server/apimodels"
)
import "github.com/daglabs/btcd/apiserver/utils"
// GetFeeEstimatesHandler returns the fee estimates for different priorities
// for accepting a transaction in the DAG.
func GetFeeEstimatesHandler() (interface{}, error) {
return &apimodels.FeeEstimateResponse{
func GetFeeEstimatesHandler() (interface{}, *utils.HandlerError) {
return &feeEstimateResponse{
HighPriority: 3,
NormalPriority: 2,
LowPriority: 1,

View File

@@ -0,0 +1,6 @@
package controllers
// RawTransaction represents a raw transaction posted to the API server
type RawTransaction struct {
RawTransaction string `json:"rawTransaction"`
}

View File

@@ -0,0 +1,113 @@
package controllers
import (
"encoding/hex"
"github.com/daglabs/btcd/apiserver/models"
"github.com/daglabs/btcd/btcjson"
)
type transactionResponse struct {
TransactionHash string `json:"transactionHash"`
TransactionID string `json:"transactionId"`
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
SubnetworkID string `json:"subnetworkId"`
LockTime uint64 `json:"lockTime"`
Gas uint64 `json:"gas,omitempty"`
PayloadHash string `json:"payloadHash,omitempty"`
Payload string `json:"payload,omitempty"`
Inputs []*transactionInputResponse `json:"inputs"`
Outputs []*transactionOutputResponse `json:"outputs"`
Mass uint64 `json:"mass"`
}
type transactionOutputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
Value uint64 `json:"value"`
ScriptPubKey string `json:"scriptPubKey"`
Address string `json:"address,omitempty"`
AcceptingBlockHash string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
}
type transactionInputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
PreviousTransactionID string `json:"previousTransactionId"`
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
SignatureScript string `json:"signatureScript"`
Sequence uint64 `json:"sequence"`
Address string `json:"address"`
}
type blockResponse struct {
BlockHash string
Version int32
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Timestamp uint64
Bits uint32
Nonce uint64
AcceptingBlockHash *string
BlueScore uint64
IsChainBlock bool
Mass uint64
}
type feeEstimateResponse struct {
HighPriority, NormalPriority, LowPriority float64
}
func convertTxModelToTxResponse(tx *models.Transaction) *transactionResponse {
txRes := &transactionResponse{
TransactionHash: tx.TransactionHash,
TransactionID: tx.TransactionID,
AcceptingBlockHash: tx.AcceptingBlock.BlockHash,
AcceptingBlockBlueScore: tx.AcceptingBlock.BlueScore,
SubnetworkID: tx.Subnetwork.SubnetworkID,
LockTime: tx.LockTime,
Gas: tx.Gas,
PayloadHash: tx.PayloadHash,
Payload: hex.EncodeToString(tx.Payload),
Inputs: make([]*transactionInputResponse, len(tx.TransactionInputs)),
Outputs: make([]*transactionOutputResponse, len(tx.TransactionOutputs)),
Mass: tx.Mass,
}
for i, txOut := range tx.TransactionOutputs {
txRes.Outputs[i] = &transactionOutputResponse{
Value: txOut.Value,
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
Address: txOut.Address.Address,
}
}
for i, txIn := range tx.TransactionInputs {
txRes.Inputs[i] = &transactionInputResponse{
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
Sequence: txIn.Sequence,
Address: txIn.PreviousTransactionOutput.Address.Address,
}
}
return txRes
}
func convertBlockModelToBlockResponse(block *models.Block) *blockResponse {
blockRes := &blockResponse{
BlockHash: block.BlockHash,
Version: block.Version,
HashMerkleRoot: block.HashMerkleRoot,
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
UTXOCommitment: block.UTXOCommitment,
Timestamp: uint64(block.Timestamp.Unix()),
Bits: block.Bits,
Nonce: block.Nonce,
BlueScore: block.BlueScore,
IsChainBlock: block.IsChainBlock,
Mass: block.Mass,
}
if block.AcceptingBlock != nil {
blockRes.AcceptingBlockHash = btcjson.String(block.AcceptingBlock.BlockHash)
}
return blockRes
}

View File

@@ -0,0 +1,187 @@
package controllers
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/apiserver/models"
"github.com/daglabs/btcd/apiserver/utils"
"github.com/daglabs/btcd/btcjson"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"github.com/jinzhu/gorm"
)
const maxGetTransactionsLimit = 1000
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
func GetTransactionByIDHandler(txID string) (interface{}, *utils.HandlerError) {
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
fmt.Sprintf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
}
db, err := database.DB()
if err != nil {
return nil, utils.NewInternalServerHandlerError(err.Error())
}
tx := &models.Transaction{}
query := db.Where(&models.Transaction{TransactionID: txID})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if utils.IsDBRecordNotFoundError(dbErrors) {
return nil, utils.NewHandlerError(http.StatusNotFound, "No transaction with the given txid was found.")
}
if utils.HasDBError(dbErrors) {
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transaction from the database:", dbErrors)
}
return convertTxModelToTxResponse(tx), nil
}
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
func GetTransactionByHashHandler(txHash string) (interface{}, *utils.HandlerError) {
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
fmt.Sprintf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
tx := &models.Transaction{}
query := db.Where(&models.Transaction{TransactionHash: txHash})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if utils.IsDBRecordNotFoundError(dbErrors) {
return nil, utils.NewHandlerError(http.StatusNotFound, "No transaction with the given txhash was found.")
}
if utils.HasDBError(dbErrors) {
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transaction from the database:", dbErrors)
}
return convertTxModelToTxResponse(tx), nil
}
// GetTransactionsByAddressHandler searches for all transactions
// where the given address is either an input or an output.
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, *utils.HandlerError) {
if limit > maxGetTransactionsLimit {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
fmt.Sprintf("The maximum allowed value for the limit is %d", maxGetTransactionsLimit))
}
db, err := database.DB()
if err != nil {
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
txs := []*models.Transaction{}
query := db.
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`transaction_output_id`").
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`").
Where("`out_addresses`.`address` = ?", address).
Or("`in_addresses`.`address` = ?", address).
Limit(limit).
Offset(skip).
Order("`transactions`.`id` ASC")
dbResult := addTxPreloadedFields(query).Find(&txs)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading transactions from the database:", dbErrors)
}
txResponses := make([]*transactionResponse, len(txs))
for i, tx := range txs {
txResponses[i] = convertTxModelToTxResponse(tx)
}
return txResponses, nil
}
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
func GetUTXOsByAddressHandler(address string) (interface{}, *utils.HandlerError) {
db, err := database.DB()
if err != nil {
return nil, utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
var transactionOutputs []*models.TransactionOutput
dbErrors := db.
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
Preload("Transaction.AcceptingBlock").
Find(&transactionOutputs).GetErrors()
if len(dbErrors) > 0 {
return nil, utils.NewHandlerErrorFromDBErrors("Some errors where encountered when loading UTXOs from the database:", dbErrors)
}
UTXOsResponses := make([]*transactionOutputResponse, len(transactionOutputs))
for i, transactionOutput := range transactionOutputs {
UTXOsResponses[i] = &transactionOutputResponse{
Value: transactionOutput.Value,
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
AcceptingBlockHash: transactionOutput.Transaction.AcceptingBlock.BlockHash,
AcceptingBlockBlueScore: transactionOutput.Transaction.AcceptingBlock.BlueScore,
}
}
return UTXOsResponses, nil
}
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
return query.Preload("AcceptingBlock").
Preload("Subnetwork").
Preload("TransactionOutputs").
Preload("TransactionOutputs.Address").
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
Preload("TransactionInputs.PreviousTransactionOutput.Address")
}
// PostTransaction forwards a raw transaction to the JSON-RPC API server
func PostTransaction(requestBody []byte) *utils.HandlerError {
client, err := jsonrpc.GetClient()
if err != nil {
return utils.NewInternalServerHandlerError(err.Error())
}
rawTx := &RawTransaction{}
err = json.Unmarshal(requestBody, rawTx)
if err != nil {
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
fmt.Sprintf("Error unmarshalling request body: %s", err),
"The request body is not json-formatted")
}
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
if err != nil {
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
fmt.Sprintf("Error decoding hex raw transaction: %s", err),
"The raw transaction is not a hex-encoded transaction")
}
txReader := bytes.NewReader(txBytes)
tx := &wire.MsgTx{}
err = tx.BtcDecode(txReader, 0)
if err != nil {
return utils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
fmt.Sprintf("Error decoding raw transaction: %s", err),
"Error decoding raw transaction")
}
_, err = client.SendRawTransaction(tx, true)
if err != nil {
if rpcErr, ok := err.(btcjson.RPCError); ok && rpcErr.Code == btcjson.ErrRPCVerify {
return utils.NewHandlerError(http.StatusInternalServerError, rpcErr.Message)
}
return utils.NewHandlerError(http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))
}
return nil
}

View File

@@ -1,19 +1,18 @@
package database
import (
nativeerrors "errors"
"errors"
"fmt"
"github.com/kaspanet/kaspad/kasparov/config"
"github.com/pkg/errors"
"os"
"github.com/daglabs/btcd/apiserver/config"
"github.com/golang-migrate/migrate/v4/source"
"github.com/jinzhu/gorm"
"github.com/golang-migrate/migrate/v4"
)
// db is the Kasparov database.
// db is the API server database.
var db *gorm.DB
// DB returns a reference to the database connection
@@ -33,7 +32,7 @@ func (l gormLogger) Print(v ...interface{}) {
// Connect connects to the database mentioned in
// config variable.
func Connect(cfg *config.KasparovFlags) error {
func Connect(cfg *config.Config) error {
connectionString := buildConnectionString(cfg)
migrator, driver, err := openMigrator(connectionString)
if err != nil {
@@ -41,10 +40,10 @@ func Connect(cfg *config.KasparovFlags) error {
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
return fmt.Errorf("Error checking whether the database is current: %s", err)
}
if !isCurrent {
return errors.Errorf("Database is not current (version %d). Please migrate"+
return fmt.Errorf("Database is not current (version %d). Please migrate"+
" the database by running the server with --migrate flag and then run it again.", version)
}
@@ -67,7 +66,7 @@ func Close() error {
return err
}
func buildConnectionString(cfg *config.KasparovFlags) string {
func buildConnectionString(cfg *config.Config) string {
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
}
@@ -77,14 +76,14 @@ func buildConnectionString(cfg *config.KasparovFlags) string {
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
// Get the current version
version, isDirty, err := migrator.Version()
if nativeerrors.Is(err, migrate.ErrNilVersion) {
if err == migrate.ErrNilVersion {
return false, 0, nil
}
if err != nil {
return false, 0, errors.WithStack(err)
return false, 0, err
}
if isDirty {
return false, 0, errors.Errorf("Database is dirty")
return false, 0, fmt.Errorf("Database is dirty")
}
// The database is current if Next returns ErrNotExist
@@ -98,7 +97,7 @@ func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, err
}
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
driver, err := source.Open("file://../database/migrations")
driver, err := source.Open("file://migrations")
if err != nil {
return nil, nil, err
}
@@ -111,7 +110,7 @@ func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, err
}
// Migrate database to the latest version.
func Migrate(cfg *config.KasparovFlags) error {
func Migrate(cfg *config.Config) error {
connectionString := buildConnectionString(cfg)
migrator, driver, err := openMigrator(connectionString)
if err != nil {
@@ -119,7 +118,7 @@ func Migrate(cfg *config.KasparovFlags) error {
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
return fmt.Errorf("Error checking whether the database is current: %s", err)
}
if isCurrent {
log.Infof("Database is already up-to-date (version %d)", version)
@@ -134,7 +133,7 @@ func Migrate(cfg *config.KasparovFlags) error {
return err
}
if isDirty {
return errors.Errorf("error migrating database: database is dirty")
return fmt.Errorf("error migrating database: database is dirty")
}
log.Infof("Migrated database to the latest version (version %d)", version)
return nil

View File

@@ -0,0 +1,9 @@
package database
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("DTBS")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -1,9 +1,9 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
RUN mkdir -p /go/src/github.com/daglabs/btcd
WORKDIR /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/daglabs/btcd
RUN apk add --no-cache curl git
@@ -14,7 +14,7 @@ RUN go mod download
COPY . .
RUN cd kasparov/syncd && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kasparov-syncd .
RUN cd apiserver && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o apiserver .
# --- multistage docker build: stage #2: runtime image
FROM alpine
@@ -22,7 +22,7 @@ WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/kasparov/syncd/ /app/
COPY --from=build /go/src/github.com/daglabs/btcd/apiserver/ /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/kasparov-syncd"]
CMD ["/app/apiserver"]

View File

@@ -1,16 +1,17 @@
package jsonrpc
import (
"github.com/kaspanet/kaspad/kasparov/config"
"github.com/pkg/errors"
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/rpcclient"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
// Client represents a connection to the JSON-RPC API of a full node
@@ -54,13 +55,13 @@ func Close() {
}
// Connect initiates a connection to the JSON-RPC API Server
func Connect(cfg *config.KasparovFlags) error {
func Connect(cfg *config.Config) error {
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return errors.Errorf("Error reading certificates file: %s", err)
return fmt.Errorf("Error reading certificates file: %s", err)
}
}
@@ -70,7 +71,7 @@ func Connect(cfg *config.KasparovFlags) error {
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
RequestTimeout: time.Second * 60,
RequestTimeout: time.Second * 5,
}
if !cfg.DisableTLS {
@@ -80,7 +81,7 @@ func Connect(cfg *config.KasparovFlags) error {
var err error
client, err = newClient(connCfg)
if err != nil {
return errors.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
return fmt.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
}
return nil
@@ -110,14 +111,14 @@ func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
return nil, fmt.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
return nil, fmt.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
if err = client.NotifyChainChanges(); err != nil {
return nil, errors.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
return nil, fmt.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
}
return client, nil

11
apiserver/log.go Normal file
View File

@@ -0,0 +1,11 @@
package main
import (
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
var (
log = logger.BackendLog.Logger("APIS")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -0,0 +1,24 @@
package logger
import (
"fmt"
"github.com/daglabs/btcd/logs"
"os"
)
// BackendLog is the logging backend used to create all subsystem loggers.
var BackendLog = logs.NewBackend()
// InitLog attaches log file and error log file to the backend log.
func InitLog(logFile, errLogFile string) {
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}

73
apiserver/main.go Normal file
View File

@@ -0,0 +1,73 @@
package main
import (
"fmt"
"os"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/apiserver/server"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/signal"
"github.com/daglabs/btcd/util/panics"
_ "github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
func main() {
defer panics.HandlePanic(log, logger.BackendLog)
cfg, err := config.Parse()
if err != nil {
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
_, fErr := fmt.Fprintf(os.Stderr, errString)
if fErr != nil {
panic(errString)
}
return
}
if cfg.Migrate {
err := database.Migrate(cfg)
if err != nil {
panic(fmt.Errorf("Error migrating database: %s", err))
}
return
}
err = database.Connect(cfg)
if err != nil {
panic(fmt.Errorf("Error connecting to database: %s", err))
}
defer func() {
err := database.Close()
if err != nil {
panic(fmt.Errorf("Error closing the database: %s", err))
}
}()
err = jsonrpc.Connect(cfg)
if err != nil {
panic(fmt.Errorf("Error connecting to servers: %s", err))
}
defer jsonrpc.Close()
shutdownServer := server.Start(cfg.HTTPListen)
defer shutdownServer()
doneChan := make(chan struct{}, 1)
spawn(func() {
err := startSync(doneChan)
if err != nil {
panic(err)
}
})
interrupt := signal.InterruptListener()
<-interrupt
// Gracefully stop syncing
doneChan <- struct{}{}
}

View File

@@ -1,7 +1,7 @@
CREATE TABLE `raw_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`block_data` MEDIUMBLOB NOT NULL,
`block_data` BLOB NOT NULL,
PRIMARY KEY (`block_id`),
CONSTRAINT `fk_raw_blocks_block_id`
FOREIGN KEY (`block_id`)

View File

@@ -1,7 +1,7 @@
CREATE TABLE `subnetworks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`subnetwork_id` CHAR(64) NOT NULL,
`subnetwork_id` CHAR(64) NOT NULL,
`gas_limit` BIGINT UNSIGNED NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)

View File

@@ -1,4 +1,4 @@
package dbmodels
package models
import (
"time"
@@ -104,7 +104,7 @@ type TransactionInput struct {
Sequence uint64
}
// Address is the gorm model for the 'addresses' table
// Address is the gorm model for the 'utxos' table
type Address struct {
ID uint64 `gorm:"primary_key"`
Address string

9
apiserver/server/log.go Normal file
View File

@@ -0,0 +1,9 @@
package server
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("REST")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -0,0 +1,50 @@
package server
import (
"fmt"
"github.com/daglabs/btcd/apiserver/utils"
"net/http"
"runtime/debug"
)
var nextRequestID uint64 = 1
func addRequestMetadataMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rCtx := utils.ToAPIServerContext(r.Context()).SetRequestID(nextRequestID)
r.WithContext(rCtx)
nextRequestID++
next.ServeHTTP(w, r)
})
}
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := utils.ToAPIServerContext(r.Context())
ctx.Infof("Method: %s URI: %s", r.Method, r.RequestURI)
next.ServeHTTP(w, r)
})
}
func recoveryMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := utils.ToAPIServerContext(r.Context())
defer func() {
recoveryErr := recover()
if recoveryErr != nil {
recoveryErrStr := fmt.Sprintf("%s", recoveryErr)
log.Criticalf("Fatal error: %s", recoveryErrStr)
log.Criticalf("Stack trace: %s", debug.Stack())
sendErr(ctx, w, utils.NewInternalServerHandlerError(recoveryErrStr))
}
}()
h.ServeHTTP(w, r)
})
}
func setJSONMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
h.ServeHTTP(w, r)
})
}

244
apiserver/server/routes.go Normal file
View File

@@ -0,0 +1,244 @@
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"github.com/daglabs/btcd/apiserver/controllers"
"github.com/daglabs/btcd/apiserver/utils"
"github.com/gorilla/mux"
)
const (
routeParamTxID = "txID"
routeParamTxHash = "txHash"
routeParamAddress = "address"
routeParamBlockHash = "blockHash"
)
const (
queryParamSkip = "skip"
queryParamLimit = "limit"
queryParamOrder = "order"
)
const (
defaultGetTransactionsLimit = 100
defaultGetBlocksLimit = 25
defaultGetBlocksOrder = controllers.OrderAscending
)
type handlerFunc func(ctx *utils.APIServerContext, routeParams map[string]string, queryParams map[string]string, requestBody []byte) (
interface{}, *utils.HandlerError)
func makeHandler(handler handlerFunc) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
ctx := utils.ToAPIServerContext(r.Context())
var requestBody []byte
if r.Method == "POST" {
var err error
requestBody, err = ioutil.ReadAll(r.Body)
if err != nil {
sendErr(ctx, w, utils.NewHandlerError(500, "Internal server error occured"))
}
}
flattenedQueryParams, hErr := flattenQueryParams(r.URL.Query())
if hErr != nil {
sendErr(ctx, w, hErr)
return
}
response, hErr := handler(ctx, mux.Vars(r), flattenedQueryParams, requestBody)
if hErr != nil {
sendErr(ctx, w, hErr)
return
}
if response != nil {
sendJSONResponse(w, response)
}
}
}
func flattenQueryParams(queryParams map[string][]string) (map[string]string, *utils.HandlerError) {
flattenedMap := make(map[string]string)
for param, valuesSlice := range queryParams {
if len(valuesSlice) > 1 {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("Couldn't parse the '%s' query parameter:"+
" expected a single value but got multiple values", param))
}
flattenedMap[param] = valuesSlice[0]
}
return flattenedMap, nil
}
type clientError struct {
ErrorCode int `json:"errorCode"`
ErrorMessage string `json:"errorMessage"`
}
func sendErr(ctx *utils.APIServerContext, w http.ResponseWriter, hErr *utils.HandlerError) {
errMsg := fmt.Sprintf("got error: %s", hErr)
ctx.Warnf(errMsg)
w.WriteHeader(hErr.Code)
sendJSONResponse(w, &clientError{
ErrorCode: hErr.Code,
ErrorMessage: hErr.ClientMessage,
})
}
func sendJSONResponse(w http.ResponseWriter, response interface{}) {
b, err := json.Marshal(response)
if err != nil {
panic(err)
}
_, err = fmt.Fprintf(w, string(b))
if err != nil {
panic(err)
}
}
func mainHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string, _ []byte) (interface{}, *utils.HandlerError) {
return struct {
Message string `json:"message"`
}{
Message: "API server is running",
}, nil
}
func addRoutes(router *mux.Router) {
router.HandleFunc("/", makeHandler(mainHandler))
router.HandleFunc(
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
makeHandler(getTransactionByIDHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
makeHandler(getTransactionByHashHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
makeHandler(getTransactionsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
makeHandler(getUTXOsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
makeHandler(getBlockByHashHandler)).
Methods("GET")
router.HandleFunc(
"/blocks",
makeHandler(getBlocksHandler)).
Methods("GET")
router.HandleFunc(
"/fee-estimates",
makeHandler(getFeeEstimatesHandler)).
Methods("GET")
router.HandleFunc(
"/transaction",
makeHandler(postTransactionHandler)).
Methods("POST")
}
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, *utils.HandlerError) {
if _, ok := queryParams[param]; ok {
intValue, err := strconv.Atoi(queryParams[param])
if err != nil {
return 0, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("Couldn't parse the '%s' query parameter: %s", param, err))
}
return intValue, nil
}
return defaultValue, nil
}
func getTransactionByIDHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
}
func getTransactionByHashHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
}
func getTransactionsByAddressHandler(_ *utils.APIServerContext, routeParams map[string]string, queryParams map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
skip, hErr := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if hErr != nil {
return nil, hErr
}
limit, hErr := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
if hErr != nil {
return nil, hErr
}
if _, ok := queryParams[queryParamLimit]; ok {
var err error
skip, err = strconv.Atoi(queryParams[queryParamLimit])
if err != nil {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity,
fmt.Sprintf("Couldn't parse the '%s' query parameter: %s", queryParamLimit, err))
}
}
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
}
func getUTXOsByAddressHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
}
func getBlockByHashHandler(_ *utils.APIServerContext, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
}
func getFeeEstimatesHandler(_ *utils.APIServerContext, _ map[string]string, _ map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
return controllers.GetFeeEstimatesHandler()
}
func getBlocksHandler(_ *utils.APIServerContext, _ map[string]string, queryParams map[string]string,
_ []byte) (interface{}, *utils.HandlerError) {
skip, hErr := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if hErr != nil {
return nil, hErr
}
limit, hErr := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
if hErr != nil {
return nil, hErr
}
order := defaultGetBlocksOrder
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
return nil, utils.NewHandlerError(http.StatusUnprocessableEntity, fmt.Sprintf("'%s' is not a valid value for the '%s' query parameter", orderParamValue, queryParamLimit))
}
order = orderParamValue
}
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
}
func postTransactionHandler(_ *utils.APIServerContext, _ map[string]string, _ map[string]string,
requestBody []byte) (interface{}, *utils.HandlerError) {
return nil, controllers.PostTransaction(requestBody)
}

View File

@@ -2,7 +2,6 @@ package server
import (
"context"
"github.com/kaspanet/kaspad/httpserverutils"
"net/http"
"time"
@@ -16,10 +15,10 @@ const gracefulShutdownTimeout = 30 * time.Second
// function to gracefully shutdown it.
func Start(listenAddr string) func() {
router := mux.NewRouter()
router.Use(httpserverutils.AddRequestMetadataMiddleware)
router.Use(httpserverutils.RecoveryMiddleware)
router.Use(httpserverutils.LoggingMiddleware)
router.Use(httpserverutils.SetJSONMiddleware)
router.Use(addRequestMetadataMiddleware)
router.Use(recoveryMiddleware)
router.Use(loggingMiddleware)
router.Use(setJSONMiddleware)
addRoutes(router)
httpServer := &http.Server{
Addr: listenAddr,

877
apiserver/sync.go Normal file
View File

@@ -0,0 +1,877 @@
package main
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/daglabs/btcd/apiserver/config"
"github.com/daglabs/btcd/apiserver/database"
"github.com/daglabs/btcd/apiserver/jsonrpc"
"github.com/daglabs/btcd/apiserver/models"
"github.com/daglabs/btcd/apiserver/utils"
"github.com/daglabs/btcd/btcjson"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/jinzhu/gorm"
"strconv"
"time"
)
// startSync keeps the node and the API server in sync. On start, it downloads
// all data that's missing from the API server, and once it's done it keeps
// sync with the node via notifications.
func startSync(doneChan chan struct{}) error {
client, err := jsonrpc.GetClient()
if err != nil {
return err
}
// Mass download missing data
err = fetchInitialData(client)
if err != nil {
return err
}
// Keep the node and the API server in sync
sync(client, doneChan)
return nil
}
// fetchInitialData downloads all data that's currently missing from
// the database.
func fetchInitialData(client *jsonrpc.Client) error {
err := syncBlocks(client)
if err != nil {
return err
}
err = syncSelectedParentChain(client)
if err != nil {
return err
}
return nil
}
// sync keeps the API server in sync with the node via notifications
func sync(client *jsonrpc.Client, doneChan chan struct{}) {
// ChainChangedMsgs must be processed in order and there may be times
// when we may not be able to process them (e.g. appropriate
// BlockAddedMsgs haven't arrived yet). As such, we pop messages from
// client.OnChainChanged, make sure we're able to handle them, and
// only then push them into nextChainChangedChan for them to be
// actually handled.
blockAddedMsgHandledChan := make(chan struct{})
nextChainChangedChan := make(chan *jsonrpc.ChainChangedMsg)
spawn(func() {
for chainChanged := range client.OnChainChanged {
for {
<-blockAddedMsgHandledChan
canHandle, err := canHandleChainChangedMsg(chainChanged)
if err != nil {
panic(err)
}
if canHandle {
break
}
}
nextChainChangedChan <- chainChanged
}
})
// Handle client notifications until we're told to stop
loop:
for {
select {
case blockAdded := <-client.OnBlockAdded:
handleBlockAddedMsg(client, blockAdded)
blockAddedMsgHandledChan <- struct{}{}
case chainChanged := <-nextChainChangedChan:
handleChainChangedMsg(chainChanged)
case <-doneChan:
log.Infof("startSync stopped")
break loop
}
}
}
// syncBlocks attempts to download all DAG blocks starting with
// the bluest block, and then inserts them into the database.
func syncBlocks(client *jsonrpc.Client) error {
// Start syncing from the bluest block hash. We use blue score to
// simulate the "last" block we have because blue-block order is
// the order that the node uses in the various JSONRPC calls.
startHash, err := findHashOfBluestBlock(false)
if err != nil {
return err
}
var blocks []string
var rawBlocks []btcjson.GetBlockVerboseResult
for {
blocksResult, err := client.GetBlocks(true, false, startHash)
if err != nil {
return err
}
if len(blocksResult.Hashes) == 0 {
break
}
rawBlocksResult, err := client.GetBlocks(true, true, startHash)
if err != nil {
return err
}
startHash = &blocksResult.Hashes[len(blocksResult.Hashes)-1]
blocks = append(blocks, blocksResult.Blocks...)
rawBlocks = append(rawBlocks, rawBlocksResult.RawBlocks...)
}
return addBlocks(client, blocks, rawBlocks)
}
// syncSelectedParentChain attempts to download the selected parent
// chain starting with the bluest chain-block, and then updates the
// database accordingly.
func syncSelectedParentChain(client *jsonrpc.Client) error {
// Start syncing from the bluest chain-block hash. We use blue
// score to simulate the "last" block we have because blue-block
// order is the order that the node uses in the various JSONRPC
// calls.
startHash, err := findHashOfBluestBlock(true)
if err != nil {
return err
}
for {
chainFromBlockResult, err := client.GetChainFromBlock(false, startHash)
if err != nil {
return err
}
if len(chainFromBlockResult.AddedChainBlocks) == 0 {
break
}
startHash = &chainFromBlockResult.AddedChainBlocks[len(chainFromBlockResult.AddedChainBlocks)-1].Hash
err = updateSelectedParentChain(chainFromBlockResult.RemovedChainBlockHashes,
chainFromBlockResult.AddedChainBlocks)
if err != nil {
return err
}
}
return nil
}
// findHashOfBluestBlock finds the block with the highest
// blue score in the database. If the database is empty,
// return nil.
func findHashOfBluestBlock(mustBeChainBlock bool) (*string, error) {
dbTx, err := database.DB()
if err != nil {
return nil, err
}
var block models.Block
dbQuery := dbTx.Order("blue_score DESC")
if mustBeChainBlock {
dbQuery = dbQuery.Where(&models.Block{IsChainBlock: true})
}
dbResult := dbQuery.First(&block)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to find hash of bluest block: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
return nil, nil
}
return &block.BlockHash, nil
}
// fetchBlock downloads the serialized block and raw block data of
// the block with hash blockHash.
func fetchBlock(client *jsonrpc.Client, blockHash *daghash.Hash) (
block string, rawBlock *btcjson.GetBlockVerboseResult, err error) {
msgBlock, err := client.GetBlock(blockHash, nil)
if err != nil {
return "", nil, err
}
writer := bytes.NewBuffer(make([]byte, 0, msgBlock.SerializeSize()))
err = msgBlock.Serialize(writer)
if err != nil {
return "", nil, err
}
block = hex.EncodeToString(writer.Bytes())
rawBlock, err = client.GetBlockVerboseTx(blockHash, nil)
if err != nil {
return "", nil, err
}
return block, rawBlock, nil
}
// addBlocks inserts data in the given blocks and rawBlocks pairwise
// into the database. See addBlock for further details.
func addBlocks(client *jsonrpc.Client, blocks []string, rawBlocks []btcjson.GetBlockVerboseResult) error {
for i, rawBlock := range rawBlocks {
block := blocks[i]
err := addBlock(client, block, rawBlock)
if err != nil {
return err
}
}
return nil
}
func doesBlockExist(dbTx *gorm.DB, blockHash string) (bool, error) {
var dbBlock models.Block
dbResult := dbTx.
Where(&models.Block{BlockHash: blockHash}).
First(&dbBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return false, utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
}
return !utils.IsDBRecordNotFoundError(dbErrors), nil
}
// addBlocks inserts all the data that could be gleaned out of the serialized
// block and raw block data into the database. This includes transactions,
// subnetworks, and addresses.
// Note that if this function may take a nil dbTx, in which case it would start
// a database transaction by itself and commit it before returning.
func addBlock(client *jsonrpc.Client, block string, rawBlock btcjson.GetBlockVerboseResult) error {
db, err := database.DB()
if err != nil {
return err
}
dbTx := db.Begin()
// Skip this block if it already exists.
blockExists, err := doesBlockExist(dbTx, rawBlock.Hash)
if err != nil {
return err
}
if blockExists {
dbTx.Commit()
return nil
}
dbBlock, err := insertBlock(dbTx, rawBlock)
if err != nil {
return err
}
err = insertBlockParents(dbTx, rawBlock, dbBlock)
if err != nil {
return err
}
err = insertBlockData(dbTx, block, dbBlock)
if err != nil {
return err
}
for i, transaction := range rawBlock.RawTx {
dbSubnetwork, err := insertSubnetwork(dbTx, &transaction, client)
if err != nil {
return err
}
dbTransaction, err := insertTransaction(dbTx, &transaction, dbSubnetwork)
if err != nil {
return err
}
err = insertTransactionBlock(dbTx, dbBlock, dbTransaction, uint32(i))
if err != nil {
return err
}
err = insertTransactionInputs(dbTx, &transaction, dbTransaction)
if err != nil {
return err
}
err = insertTransactionOutputs(dbTx, &transaction, dbTransaction)
if err != nil {
return err
}
}
dbTx.Commit()
return nil
}
func insertBlock(dbTx *gorm.DB, rawBlock btcjson.GetBlockVerboseResult) (*models.Block, error) {
bits, err := strconv.ParseUint(rawBlock.Bits, 16, 32)
if err != nil {
return nil, err
}
dbBlock := models.Block{
BlockHash: rawBlock.Hash,
Version: rawBlock.Version,
HashMerkleRoot: rawBlock.HashMerkleRoot,
AcceptedIDMerkleRoot: rawBlock.AcceptedIDMerkleRoot,
UTXOCommitment: rawBlock.UTXOCommitment,
Timestamp: time.Unix(rawBlock.Time, 0),
Bits: uint32(bits),
Nonce: rawBlock.Nonce,
BlueScore: rawBlock.BlueScore,
IsChainBlock: false, // This must be false for updateSelectedParentChain to work properly
Mass: rawBlock.Mass,
}
dbResult := dbTx.Create(&dbBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to insert block: ", dbErrors)
}
return &dbBlock, nil
}
func insertBlockParents(dbTx *gorm.DB, rawBlock btcjson.GetBlockVerboseResult, dbBlock *models.Block) error {
// Exit early if this is the genesis block
if len(rawBlock.ParentHashes) == 0 {
return nil
}
dbWhereBlockIDsIn := make([]*models.Block, len(rawBlock.ParentHashes))
for i, parentHash := range rawBlock.ParentHashes {
dbWhereBlockIDsIn[i] = &models.Block{BlockHash: parentHash}
}
var dbParents []models.Block
dbResult := dbTx.
Where(dbWhereBlockIDsIn).
First(&dbParents)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find blocks: ", dbErrors)
}
if len(dbParents) != len(rawBlock.ParentHashes) {
return fmt.Errorf("some parents are missing for block: %s", rawBlock.Hash)
}
for _, dbParent := range dbParents {
dbParentBlock := models.ParentBlock{
BlockID: dbBlock.ID,
ParentBlockID: dbParent.ID,
}
dbResult := dbTx.Create(&dbParentBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to insert parentBlock: ", dbErrors)
}
}
return nil
}
func insertBlockData(dbTx *gorm.DB, block string, dbBlock *models.Block) error {
blockData, err := hex.DecodeString(block)
if err != nil {
return err
}
dbRawBlock := models.RawBlock{
BlockID: dbBlock.ID,
BlockData: blockData,
}
dbResult := dbTx.Create(&dbRawBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to insert rawBlock: ", dbErrors)
}
return nil
}
func insertSubnetwork(dbTx *gorm.DB, transaction *btcjson.TxRawResult, client *jsonrpc.Client) (*models.Subnetwork, error) {
var dbSubnetwork models.Subnetwork
dbResult := dbTx.
Where(&models.Subnetwork{SubnetworkID: transaction.Subnetwork}).
First(&dbSubnetwork)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to find subnetwork: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
subnetwork, err := client.GetSubnetwork(transaction.Subnetwork)
if err != nil {
return nil, err
}
dbSubnetwork = models.Subnetwork{
SubnetworkID: transaction.Subnetwork,
GasLimit: subnetwork.GasLimit,
}
dbResult := dbTx.Create(&dbSubnetwork)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to insert subnetwork: ", dbErrors)
}
}
return &dbSubnetwork, nil
}
func insertTransaction(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbSubnetwork *models.Subnetwork) (*models.Transaction, error) {
var dbTransaction models.Transaction
dbResult := dbTx.
Where(&models.Transaction{TransactionID: transaction.TxID}).
First(&dbTransaction)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to find transaction: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
payload, err := hex.DecodeString(transaction.Payload)
if err != nil {
return nil, err
}
dbTransaction = models.Transaction{
TransactionHash: transaction.Hash,
TransactionID: transaction.TxID,
LockTime: transaction.LockTime,
SubnetworkID: dbSubnetwork.ID,
Gas: transaction.Gas,
Mass: transaction.Mass,
PayloadHash: transaction.PayloadHash,
Payload: payload,
}
dbResult := dbTx.Create(&dbTransaction)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to insert transaction: ", dbErrors)
}
}
return &dbTransaction, nil
}
func insertTransactionBlock(dbTx *gorm.DB, dbBlock *models.Block, dbTransaction *models.Transaction, index uint32) error {
var dbTransactionBlock models.TransactionBlock
dbResult := dbTx.
Where(&models.TransactionBlock{TransactionID: dbTransaction.ID, BlockID: dbBlock.ID}).
First(&dbTransactionBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find transactionBlock: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
dbTransactionBlock = models.TransactionBlock{
TransactionID: dbTransaction.ID,
BlockID: dbBlock.ID,
Index: index,
}
dbResult := dbTx.Create(&dbTransactionBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to insert transactionBlock: ", dbErrors)
}
}
return nil
}
func insertTransactionInputs(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbTransaction *models.Transaction) error {
isCoinbase, err := isTransactionCoinbase(transaction)
if err != nil {
return err
}
if !isCoinbase {
for _, input := range transaction.Vin {
err := insertTransactionInput(dbTx, dbTransaction, &input)
if err != nil {
return err
}
}
}
return nil
}
func isTransactionCoinbase(transaction *btcjson.TxRawResult) (bool, error) {
subnetwork, err := subnetworkid.NewFromStr(transaction.Subnetwork)
if err != nil {
return false, err
}
return subnetwork.IsEqual(subnetworkid.SubnetworkIDCoinbase), nil
}
func insertTransactionInput(dbTx *gorm.DB, dbTransaction *models.Transaction, input *btcjson.Vin) error {
var dbPreviousTransactionOutput models.TransactionOutput
dbResult := dbTx.
Joins("LEFT JOIN `transactions` ON `transactions`.`id` = `transaction_outputs`.`transaction_id`").
Where("`transactions`.`transactiond_id` = ? AND `transaction_outputs`.`index` = ?", input.TxID, input.Vout).
First(&dbPreviousTransactionOutput)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find previous transactionOutput: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
return fmt.Errorf("missing output transaction output for txID: %s and index: %d", input.TxID, input.Vout)
}
var dbTransactionInputCount int
dbResult = dbTx.
Model(&models.TransactionInput{}).
Where(&models.TransactionInput{TransactionID: dbTransaction.ID, PreviousTransactionOutputID: dbPreviousTransactionOutput.ID}).
Count(&dbTransactionInputCount)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find transactionInput: ", dbErrors)
}
if dbTransactionInputCount == 0 {
scriptSig, err := hex.DecodeString(input.ScriptSig.Hex)
if err != nil {
return nil
}
dbTransactionInput := models.TransactionInput{
TransactionID: dbTransaction.ID,
PreviousTransactionOutputID: dbPreviousTransactionOutput.ID,
Index: input.Vout,
SignatureScript: scriptSig,
Sequence: input.Sequence,
}
dbResult := dbTx.Create(&dbTransactionInput)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to insert transactionInput: ", dbErrors)
}
}
return nil
}
func insertTransactionOutputs(dbTx *gorm.DB, transaction *btcjson.TxRawResult, dbTransaction *models.Transaction) error {
for _, output := range transaction.Vout {
scriptPubKey, err := hex.DecodeString(output.ScriptPubKey.Hex)
if err != nil {
return err
}
dbAddress, err := insertAddress(dbTx, scriptPubKey)
if err != nil {
return err
}
err = insertTransactionOutput(dbTx, dbTransaction, &output, scriptPubKey, dbAddress)
if err != nil {
return err
}
}
return nil
}
func insertAddress(dbTx *gorm.DB, scriptPubKey []byte) (*models.Address, error) {
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, config.ActiveNetParams())
if err != nil {
return nil, err
}
hexAddress := addr.EncodeAddress()
var dbAddress models.Address
dbResult := dbTx.
Where(&models.Address{Address: hexAddress}).
First(&dbAddress)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to find address: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
dbAddress = models.Address{
Address: hexAddress,
}
dbResult := dbTx.Create(&dbAddress)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return nil, utils.NewErrorFromDBErrors("failed to insert address: ", dbErrors)
}
}
return &dbAddress, nil
}
func insertTransactionOutput(dbTx *gorm.DB, dbTransaction *models.Transaction,
output *btcjson.Vout, scriptPubKey []byte, dbAddress *models.Address) error {
var dbTransactionOutputCount int
dbResult := dbTx.
Model(&models.TransactionOutput{}).
Where(&models.TransactionOutput{TransactionID: dbTransaction.ID, Index: output.N}).
Count(&dbTransactionOutputCount)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find transactionOutput: ", dbErrors)
}
if dbTransactionOutputCount == 0 {
dbTransactionOutput := models.TransactionOutput{
TransactionID: dbTransaction.ID,
Index: output.N,
Value: output.Value,
IsSpent: false, // This must be false for updateSelectedParentChain to work properly
ScriptPubKey: scriptPubKey,
AddressID: dbAddress.ID,
}
dbResult := dbTx.Create(&dbTransactionOutput)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to insert transactionOutput: ", dbErrors)
}
}
return nil
}
// updateSelectedParentChain updates the database to reflect the current selected
// parent chain. First it "unaccepts" all removedChainHashes and then it "accepts"
// all addChainBlocks.
// Note that if this function may take a nil dbTx, in which case it would start
// a database transaction by itself and commit it before returning.
func updateSelectedParentChain(removedChainHashes []string, addedChainBlocks []btcjson.ChainBlock) error {
db, err := database.DB()
if err != nil {
return err
}
dbTx := db.Begin()
for _, removedHash := range removedChainHashes {
err := updateRemovedChainHashes(dbTx, removedHash)
if err != nil {
return err
}
}
for _, addedBlock := range addedChainBlocks {
err := updateAddedChainBlocks(dbTx, &addedBlock)
if err != nil {
return err
}
}
dbTx.Commit()
return nil
}
// updateRemovedChainHashes "unaccepts" the block of the given removedHash.
// That is to say, it marks it as not in the selected parent chain in the
// following ways:
// * All its TransactionInputs.PreviousTransactionOutputs are set IsSpent = false
// * All its Transactions are set AcceptingBlockID = nil
// * The block is set IsChainBlock = false
// This function will return an error if any of the above are in an unexpected state
func updateRemovedChainHashes(dbTx *gorm.DB, removedHash string) error {
var dbBlock models.Block
dbResult := dbTx.
Where(&models.Block{BlockHash: removedHash}).
First(&dbBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
return fmt.Errorf("missing block for hash: %s", removedHash)
}
if !dbBlock.IsChainBlock {
return fmt.Errorf("block erroneously marked as not a chain block: %s", removedHash)
}
var dbTransactions []models.Transaction
dbResult = dbTx.
Where(&models.Transaction{AcceptingBlockID: &dbBlock.ID}).
Preload("TransactionInputs.PreviousTransactionOutput").
Find(&dbTransactions)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find transactions: ", dbErrors)
}
for _, dbTransaction := range dbTransactions {
for _, dbTransactionInput := range dbTransaction.TransactionInputs {
dbPreviousTransactionOutput := dbTransactionInput.PreviousTransactionOutput
if !dbPreviousTransactionOutput.IsSpent {
return fmt.Errorf("cannot de-spend an unspent transaction output: %s index: %d",
dbTransaction.TransactionID, dbTransactionInput.Index)
}
dbPreviousTransactionOutput.IsSpent = false
dbResult = dbTx.Save(&dbPreviousTransactionOutput)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to update transactionOutput: ", dbErrors)
}
}
dbTransaction.AcceptingBlockID = nil
dbResult := dbTx.Save(&dbTransaction)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to update transaction: ", dbErrors)
}
}
dbBlock.IsChainBlock = false
dbResult = dbTx.Save(&dbBlock)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to update block: ", dbErrors)
}
return nil
}
// updateAddedChainBlocks "accepts" the given addedBlock. That is to say,
// it marks it as in the selected parent chain in the following ways:
// * All its TransactionInputs.PreviousTransactionOutputs are set IsSpent = true
// * All its Transactions are set AcceptingBlockID = addedBlock
// * The block is set IsChainBlock = true
// This function will return an error if any of the above are in an unexpected state
func updateAddedChainBlocks(dbTx *gorm.DB, addedBlock *btcjson.ChainBlock) error {
for _, acceptedBlock := range addedBlock.AcceptedBlocks {
var dbAccepedBlock models.Block
dbResult := dbTx.
Where(&models.Block{BlockHash: acceptedBlock.Hash}).
First(&dbAccepedBlock)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find block: ", dbErrors)
}
if utils.IsDBRecordNotFoundError(dbErrors) {
return fmt.Errorf("missing block for hash: %s", acceptedBlock.Hash)
}
if dbAccepedBlock.IsChainBlock {
return fmt.Errorf("block erroneously marked as a chain block: %s", acceptedBlock.Hash)
}
dbWhereTransactionIDsIn := make([]*models.Transaction, len(acceptedBlock.AcceptedTxIDs))
for i, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
dbWhereTransactionIDsIn[i] = &models.Transaction{TransactionID: acceptedTxID}
}
var dbAcceptedTransactions []models.Transaction
dbResult = dbTx.
Where(dbWhereTransactionIDsIn).
Preload("TransactionInputs.PreviousTransactionOutput").
First(&dbAcceptedTransactions)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to find transactions: ", dbErrors)
}
if len(dbAcceptedTransactions) != len(acceptedBlock.AcceptedTxIDs) {
return fmt.Errorf("some transaction are missing for block: %s", acceptedBlock.Hash)
}
for _, dbAcceptedTransaction := range dbAcceptedTransactions {
for _, dbTransactionInput := range dbAcceptedTransaction.TransactionInputs {
dbPreviousTransactionOutput := dbTransactionInput.PreviousTransactionOutput
if dbPreviousTransactionOutput.IsSpent {
return fmt.Errorf("cannot spend an already spent transaction output: %s index: %d",
dbAcceptedTransaction.TransactionID, dbTransactionInput.Index)
}
dbPreviousTransactionOutput.IsSpent = true
dbResult = dbTx.Save(&dbPreviousTransactionOutput)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to update transactionOutput: ", dbErrors)
}
}
dbAcceptedTransaction.AcceptingBlockID = &dbAccepedBlock.ID
dbResult = dbTx.Save(&dbAcceptedTransaction)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to update transaction: ", dbErrors)
}
}
dbAccepedBlock.IsChainBlock = true
dbResult = dbTx.Save(&dbAccepedBlock)
dbErrors = dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return utils.NewErrorFromDBErrors("failed to update block: ", dbErrors)
}
}
return nil
}
// handleBlockAddedMsg handles onBlockAdded messages
func handleBlockAddedMsg(client *jsonrpc.Client, blockAdded *jsonrpc.BlockAddedMsg) {
hash := blockAdded.Header.BlockHash()
block, rawBlock, err := fetchBlock(client, hash)
if err != nil {
log.Warnf("Could not fetch block %s: %s", hash, err)
return
}
err = addBlock(client, block, *rawBlock)
if err != nil {
log.Warnf("Could not insert block %s: %s", hash, err)
return
}
log.Infof("Added block %s", hash)
}
// canHandleChainChangedMsg checks whether we have all the necessary data
// to successfully handle a ChainChangedMsg.
func canHandleChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) (bool, error) {
dbTx, err := database.DB()
if err != nil {
return false, err
}
// Collect all unique referenced block hashes
hashes := make(map[string]struct{})
for _, removedHash := range chainChanged.RemovedChainBlockHashes {
hashes[removedHash.String()] = struct{}{}
}
for _, addedBlock := range chainChanged.AddedChainBlocks {
hashes[addedBlock.Hash.String()] = struct{}{}
for _, acceptedBlock := range addedBlock.AcceptedBlocks {
hashes[acceptedBlock.Hash.String()] = struct{}{}
}
}
// Make sure that all the hashes exist in the database
dbWhereBlockHashesIn := make([]*models.Block, len(hashes))
i := 0
for hash := range hashes {
dbWhereBlockHashesIn[i] = &models.Block{BlockHash: hash}
i++
}
var dbBlocksCount int
dbResult := dbTx.
Where(dbWhereBlockHashesIn).
Count(&dbBlocksCount)
dbErrors := dbResult.GetErrors()
if utils.HasDBError(dbErrors) {
return false, utils.NewErrorFromDBErrors("failed to find block count: ", dbErrors)
}
if len(hashes) != dbBlocksCount {
return false, nil
}
return true, nil
}
// handleChainChangedMsg handles onChainChanged messages
func handleChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) {
// Convert the data in chainChanged to something we can feed into
// updateSelectedParentChain
removedHashes, addedBlocks := convertChainChangedMsg(chainChanged)
err := updateSelectedParentChain(removedHashes, addedBlocks)
if err != nil {
log.Warnf("Could not update selected parent chain: %s", err)
return
}
log.Infof("Chain changed: removed &d blocks and added %d block",
len(removedHashes), len(addedBlocks))
}
func convertChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) (
removedHashes []string, addedBlocks []btcjson.ChainBlock) {
removedHashes = make([]string, len(chainChanged.RemovedChainBlockHashes))
for i, hash := range chainChanged.RemovedChainBlockHashes {
removedHashes[i] = hash.String()
}
addedBlocks = make([]btcjson.ChainBlock, len(chainChanged.AddedChainBlocks))
for i, addedBlock := range chainChanged.AddedChainBlocks {
acceptedBlocks := make([]btcjson.AcceptedBlock, len(addedBlock.AcceptedBlocks))
for j, acceptedBlock := range addedBlock.AcceptedBlocks {
acceptedTxIDs := make([]string, len(acceptedBlock.AcceptedTxIDs))
for k, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
acceptedTxIDs[k] = acceptedTxID.String()
}
acceptedBlocks[j] = btcjson.AcceptedBlock{
Hash: acceptedBlock.Hash.String(),
AcceptedTxIDs: acceptedTxIDs,
}
}
addedBlocks[i] = btcjson.ChainBlock{
Hash: addedBlock.Hash.String(),
AcceptedBlocks: acceptedBlocks,
}
}
return removedHashes, addedBlocks
}

View File

@@ -1,4 +1,4 @@
package httpserverutils
package utils
import (
"context"
@@ -11,69 +11,69 @@ const (
contextKeyRequestID contextKey = "REQUEST_ID"
)
// ServerContext is a context.Context wrapper that
// APIServerContext is a context.Context wrapper that
// enables custom logs with request ID.
type ServerContext struct {
type APIServerContext struct {
context.Context
}
// ToServerContext takes a context.Context instance
// and converts it to *ServerContext.
func ToServerContext(ctx context.Context) *ServerContext {
if asCtx, ok := ctx.(*ServerContext); ok {
// ToAPIServerContext takes a context.Context instance
// and converts it to *ApiServerContext.
func ToAPIServerContext(ctx context.Context) *APIServerContext {
if asCtx, ok := ctx.(*APIServerContext); ok {
return asCtx
}
return &ServerContext{Context: ctx}
return &APIServerContext{Context: ctx}
}
// SetRequestID associates a request ID for the context.
func (ctx *ServerContext) SetRequestID(requestID uint64) context.Context {
func (ctx *APIServerContext) SetRequestID(requestID uint64) context.Context {
context.WithValue(ctx, contextKeyRequestID, requestID)
return ctx
}
func (ctx *ServerContext) requestID() uint64 {
func (ctx *APIServerContext) requestID() uint64 {
id := ctx.Value(contextKeyRequestID)
uint64ID, _ := id.(uint64)
return uint64ID
}
func (ctx *ServerContext) getLogString(format string, params ...interface{}) string {
func (ctx *APIServerContext) getLogString(format string, params ...interface{}) string {
return fmt.Sprintf("RID %d: ", ctx.requestID()) + fmt.Sprintf(format, params...)
}
// Tracef writes a customized formatted context
// related log with log level 'Trace'.
func (ctx *ServerContext) Tracef(format string, params ...interface{}) {
func (ctx *APIServerContext) Tracef(format string, params ...interface{}) {
log.Trace(ctx.getLogString(format, params...))
}
// Debugf writes a customized formatted context
// related log with log level 'Debug'.
func (ctx *ServerContext) Debugf(format string, params ...interface{}) {
func (ctx *APIServerContext) Debugf(format string, params ...interface{}) {
log.Debug(ctx.getLogString(format, params...))
}
// Infof writes a customized formatted context
// related log with log level 'Info'.
func (ctx *ServerContext) Infof(format string, params ...interface{}) {
func (ctx *APIServerContext) Infof(format string, params ...interface{}) {
log.Info(ctx.getLogString(format, params...))
}
// Warnf writes a customized formatted context
// related log with log level 'Warn'.
func (ctx *ServerContext) Warnf(format string, params ...interface{}) {
func (ctx *APIServerContext) Warnf(format string, params ...interface{}) {
log.Warn(ctx.getLogString(format, params...))
}
// Errorf writes a customized formatted context
// related log with log level 'Error'.
func (ctx *ServerContext) Errorf(format string, params ...interface{}) {
func (ctx *APIServerContext) Errorf(format string, params ...interface{}) {
log.Error(ctx.getLogString(format, params...))
}
// Criticalf writes a customized formatted context
// related log with log level 'Critical'.
func (ctx *ServerContext) Criticalf(format string, params ...interface{}) {
func (ctx *APIServerContext) Criticalf(format string, params ...interface{}) {
log.Criticalf(ctx.getLogString(format, params...))
}

View File

@@ -1,10 +1,8 @@
package httpserverutils
package utils
import (
"encoding/json"
"fmt"
"github.com/jinzhu/gorm"
"github.com/pkg/errors"
"net/http"
"strings"
)
@@ -13,29 +11,29 @@ import (
// a rest route handler or a middleware.
type HandlerError struct {
Code int
Cause error
Message string
ClientMessage string
}
func (hErr *HandlerError) Error() string {
return hErr.Cause.Error()
return hErr.Message
}
// NewHandlerError returns a HandlerError with the given code and message.
func NewHandlerError(code int, err error) error {
func NewHandlerError(code int, message string) *HandlerError {
return &HandlerError{
Code: code,
Cause: err,
ClientMessage: err.Error(),
Message: message,
ClientMessage: message,
}
}
// NewHandlerErrorWithCustomClientMessage returns a HandlerError with
// the given code, message and client error message.
func NewHandlerErrorWithCustomClientMessage(code int, err error, clientMessage string) error {
func NewHandlerErrorWithCustomClientMessage(code int, message, clientMessage string) *HandlerError {
return &HandlerError{
Code: code,
Cause: err,
Message: message,
ClientMessage: clientMessage,
}
}
@@ -43,8 +41,8 @@ func NewHandlerErrorWithCustomClientMessage(code int, err error, clientMessage s
// NewInternalServerHandlerError returns a HandlerError with
// the given message, and the http.StatusInternalServerError
// status text as client message.
func NewInternalServerHandlerError(err error) error {
return NewHandlerErrorWithCustomClientMessage(http.StatusInternalServerError, err, http.StatusText(http.StatusInternalServerError))
func NewInternalServerHandlerError(message string) *HandlerError {
return NewHandlerErrorWithCustomClientMessage(http.StatusInternalServerError, message, http.StatusText(http.StatusInternalServerError))
}
// NewErrorFromDBErrors takes a slice of database errors and a prefix, and
@@ -55,7 +53,14 @@ func NewErrorFromDBErrors(prefix string, dbErrors []error) error {
for i, dbErr := range dbErrors {
dbErrorsStrings[i] = fmt.Sprintf("\"%s\"", dbErr)
}
return errors.Errorf("%s [%s]", prefix, strings.Join(dbErrorsStrings, ","))
return fmt.Errorf("%s [%s]", prefix, strings.Join(dbErrorsStrings, ","))
}
// NewHandlerErrorFromDBErrors takes a slice of database errors and a prefix, and
// returns an HandlerError with error code http.StatusInternalServerError with
// all of the database errors formatted to one string with the given prefix
func NewHandlerErrorFromDBErrors(prefix string, dbErrors []error) *HandlerError {
return NewInternalServerHandlerError(NewErrorFromDBErrors(prefix, dbErrors).Error())
}
// IsDBRecordNotFoundError returns true if the given dbErrors contains only a RecordNotFound error
@@ -67,43 +72,3 @@ func IsDBRecordNotFoundError(dbErrors []error) bool {
func HasDBError(dbErrors []error) bool {
return !IsDBRecordNotFoundError(dbErrors) && len(dbErrors) > 0
}
// ClientError is the http response that is sent to the
// client in case of an error.
type ClientError struct {
ErrorCode int `json:"errorCode"`
ErrorMessage string `json:"errorMessage"`
}
func (err *ClientError) Error() string {
return fmt.Sprintf("%s (Code: %d)", err.ErrorMessage, err.ErrorCode)
}
// SendErr takes a HandlerError and create a ClientError out of it that is sent
// to the http client.
func SendErr(ctx *ServerContext, w http.ResponseWriter, err error) {
var hErr *HandlerError
var ok bool
if hErr, ok = err.(*HandlerError); !ok {
hErr = NewInternalServerHandlerError(err).(*HandlerError)
}
ctx.Warnf("got error: %s", err)
w.WriteHeader(hErr.Code)
SendJSONResponse(w, &ClientError{
ErrorCode: hErr.Code,
ErrorMessage: hErr.ClientMessage,
})
}
// SendJSONResponse encodes the given response to JSON format and
// sends it to the client
func SendJSONResponse(w http.ResponseWriter, response interface{}) {
b, err := json.Marshal(response)
if err != nil {
panic(err)
}
_, err = fmt.Fprintf(w, string(b))
if err != nil {
panic(err)
}
}

9
apiserver/utils/log.go Normal file
View File

@@ -0,0 +1,9 @@
package utils
import "github.com/daglabs/btcd/util/panics"
import "github.com/daglabs/btcd/apiserver/logger"
var (
log = logger.BackendLog.Logger("UTIL")
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
)

View File

@@ -3,7 +3,7 @@ blockchain
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd/blockchain)
Package blockchain implements bitcoin block handling and chain selection rules.
The test coverage is currently only around 60%, but will be increasing over
@@ -21,7 +21,7 @@ block chain.
## Installation and Updating
```bash
$ go get -u github.com/kaspanet/kaspad/blockchain
$ go get -u github.com/daglabs/btcd/blockchain
```
## Bitcoin Chain Processing Overview
@@ -61,18 +61,18 @@ is by no means exhaustive:
## Examples
* [ProcessBlock Example](http://godoc.org/github.com/kaspanet/kaspad/blockchain#example-BlockChain-ProcessBlock)
* [ProcessBlock Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BlockChain-ProcessBlock)
Demonstrates how to create a new chain instance and use ProcessBlock to
attempt to add a block to the chain. This example intentionally
attempts to insert a duplicate genesis block to illustrate how an invalid
block is handled.
* [CompactToBig Example](http://godoc.org/github.com/kaspanet/kaspad/blockchain#example-CompactToBig)
* [CompactToBig Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-CompactToBig)
Demonstrates how to convert the compact "bits" in a block header which
represent the target difficulty to a big integer and display it using the
typical hex notation.
* [BigToCompact Example](http://godoc.org/github.com/kaspanet/kaspad/blockchain#example-BigToCompact)
* [BigToCompact Example](http://godoc.org/github.com/daglabs/btcd/blockchain#example-BigToCompact)
Demonstrates how to convert a target difficulty into the
compact "bits" in a block header which represent that target difficulty.

View File

@@ -6,8 +6,8 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
)
func (dag *BlockDAG) addNodeToIndexWithInvalidAncestor(block *util.Block) error {
@@ -97,12 +97,10 @@ func (dag *BlockDAG) maybeAcceptBlock(block *util.Block, flags BehaviorFlags) er
Block: block,
WasUnorphaned: flags&BFWasUnorphaned != 0,
})
if len(chainUpdates.addedChainBlockHashes) > 0 {
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
})
}
dag.sendNotification(NTChainChanged, &ChainChangedNotificationData{
RemovedChainBlockHashes: chainUpdates.removedChainBlockHashes,
AddedChainBlockHashes: chainUpdates.addedChainBlockHashes,
})
dag.dagLock.Lock()
return nil

View File

@@ -1,15 +1,15 @@
package blockdag
import (
"github.com/pkg/errors"
"errors"
"path/filepath"
"strings"
"testing"
"bou.ke/monkey"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
)
func TestMaybeAcceptBlockErrors(t *testing.T) {

View File

@@ -3,7 +3,7 @@ package blockdag
import (
"container/heap"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// baseHeap is an implementation for heap.Interface that sorts blocks by their height

View File

@@ -3,8 +3,8 @@ package blockdag
import (
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
)
// TestBlockHeap tests pushing, popping, and determining the length of the heap.

View File

@@ -1,9 +1,9 @@
package blockdag
import (
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"fmt"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
)
var (
@@ -57,7 +57,7 @@ func DBFetchBlockIDByHash(dbTx database.Tx, hash *daghash.Hash) (uint64, error)
hashIndex := dbTx.Metadata().Bucket(idByHashIndexBucketName)
serializedID := hashIndex.Get(hash[:])
if serializedID == nil {
return 0, errors.Errorf("no entry in the block ID index for block with hash %s", hash)
return 0, fmt.Errorf("no entry in the block ID index for block with hash %s", hash)
}
return DeserializeBlockID(serializedID), nil
@@ -69,7 +69,7 @@ func DBFetchBlockHashBySerializedID(dbTx database.Tx, serializedID []byte) (*dag
idIndex := dbTx.Metadata().Bucket(hashByIDIndexBucketName)
hashBytes := idIndex.Get(serializedID)
if hashBytes == nil {
return nil, errors.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
return nil, fmt.Errorf("no entry in the block ID index for block with id %d", byteOrder.Uint64(serializedID))
}
var hash daghash.Hash

View File

@@ -7,9 +7,9 @@ package blockdag
import (
"sync"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
)
// blockIndex provides facilities for keeping track of an in-memory index of the

View File

@@ -1,14 +1,14 @@
package blockdag
import (
"github.com/pkg/errors"
"errors"
"strings"
"testing"
"time"
"bou.ke/monkey"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
)
func TestAncestorErrors(t *testing.T) {

View File

@@ -1,8 +1,8 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// BlockLocator is used to help locate a specific block. The algorithm for

View File

@@ -8,8 +8,8 @@ import (
"fmt"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
// blockStatus is a bit field representing the validation state of the block.
@@ -78,6 +78,9 @@ type blockNode struct {
// hash is the double sha 256 of the block.
hash *daghash.Hash
// height is the position in the block DAG.
height uint64
// chainHeight is the number of hops you need to go down the selected parent chain in order to get to the genesis block.
chainHeight uint64
@@ -129,10 +132,18 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parents block
if len(parents) > 0 {
node.blues, node.selectedParent, node.blueScore = phantom(node, phantomK)
node.height = calculateNodeHeight(node)
node.chainHeight = calculateChainHeight(node)
}
}
func calculateNodeHeight(node *blockNode) uint64 {
if node.isGenesis() {
return 0
}
return node.parents.maxHeight() + 1
}
func calculateChainHeight(node *blockNode) uint64 {
if node.isGenesis() {
return 0
@@ -221,8 +232,8 @@ func (node *blockNode) isGenesis() bool {
return len(node.parents) == 0
}
func (node *blockNode) finalityScore(dag *BlockDAG) uint64 {
return node.blueScore / uint64(dag.dagParams.FinalityInterval)
func (node *blockNode) finalityScore() uint64 {
return node.blueScore / FinalityInterval
}
// String returns a string that contains the block hash.

View File

@@ -3,7 +3,7 @@ package blockdag
import (
"strings"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// blockSet implements a basic unsorted set of blocks
@@ -23,6 +23,17 @@ func setFromSlice(blocks ...*blockNode) blockSet {
return set
}
// maxHeight returns the height of the highest block in the block set
func (bs blockSet) maxHeight() uint64 {
var maxHeight uint64
for _, node := range bs {
if maxHeight < node.height {
maxHeight = node.height
}
}
return maxHeight
}
// add adds a block to this BlockSet
func (bs blockSet) add(block *blockNode) {
bs[*block.hash] = block

View File

@@ -4,7 +4,7 @@ import (
"reflect"
"testing"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util/daghash"
)
func TestHashes(t *testing.T) {

View File

@@ -1,8 +1,8 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"errors"
"github.com/daglabs/btcd/util"
"math"
"math/big"
"sort"

View File

@@ -1,9 +1,9 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"fmt"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"reflect"
"testing"
"time"
@@ -132,7 +132,7 @@ func checkWindowIDs(window []*blockNode, expectedIDs []string, idByBlockMap map[
ids[i] = idByBlockMap[node]
}
if !reflect.DeepEqual(ids, expectedIDs) {
return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
return fmt.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids)
}
return nil
}

View File

@@ -6,11 +6,10 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/pkg/errors"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// CheckpointConfirmations is the number of blocks before the end of the current
@@ -219,7 +218,7 @@ func (dag *BlockDAG) IsCheckpointCandidate(block *util.Block) (bool, error) {
// in the DAG match. This should always be the case unless the
// caller provided an invalid block.
if node.chainHeight != block.ChainHeight() {
return false, errors.Errorf("passed block chain height of %d does not "+
return false, fmt.Errorf("passed block chain height of %d does not "+
"match the its height in the DAG: %d", block.ChainHeight(),
node.chainHeight)
}

View File

@@ -4,16 +4,17 @@ import (
"bufio"
"bytes"
"encoding/binary"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"errors"
"fmt"
"github.com/daglabs/btcd/util/subnetworkid"
"io"
"math"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/txsort"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/txsort"
"github.com/daglabs/btcd/wire"
)
// compactFeeData is a specialized data type to store a compact list of fees
@@ -84,7 +85,7 @@ func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactF
for _, blueBlock := range node.blues {
feeData, err := dbFetchFeeData(dbTx, blueBlock.hash)
if err != nil {
return errors.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
return fmt.Errorf("Error getting fee data for block %s: %s", blueBlock.hash, err)
}
bluesFeeData[*blueBlock.hash] = feeData
@@ -102,7 +103,7 @@ func (node *blockNode) getBluesFeeData(dag *BlockDAG) (map[daghash.Hash]compactF
func dbStoreFeeData(dbTx database.Tx, blockHash *daghash.Hash, feeData compactFeeData) error {
feeBucket, err := dbTx.Metadata().CreateBucketIfNotExists(feeBucket)
if err != nil {
return errors.Errorf("Error creating or retrieving fee bucket: %s", err)
return fmt.Errorf("Error creating or retrieving fee bucket: %s", err)
}
return feeBucket.Put(blockHash.CloneBytes(), feeData)
@@ -116,7 +117,7 @@ func dbFetchFeeData(dbTx database.Tx, blockHash *daghash.Hash) (compactFeeData,
feeData := feeBucket.Get(blockHash.CloneBytes())
if feeData == nil {
return nil, errors.Errorf("No fee data found for block %s", blockHash)
return nil, fmt.Errorf("No fee data found for block %s", blockHash)
}
return feeData, nil
@@ -220,19 +221,19 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
txsAcceptanceData MultiBlockTxsAcceptanceData, feeData map[daghash.Hash]compactFeeData) (
*wire.TxIn, *wire.TxOut, error) {
blockTxsAcceptanceData, ok := txsAcceptanceData.FindAcceptanceData(blueBlock.hash)
blockTxsAcceptanceData, ok := txsAcceptanceData[*blueBlock.hash]
if !ok {
return nil, nil, errors.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
return nil, nil, fmt.Errorf("No txsAcceptanceData for block %s", blueBlock.hash)
}
blockFeeData, ok := feeData[*blueBlock.hash]
if !ok {
return nil, nil, errors.Errorf("No feeData for block %s", blueBlock.hash)
return nil, nil, fmt.Errorf("No feeData for block %s", blueBlock.hash)
}
if len(blockTxsAcceptanceData.TxAcceptanceData) != blockFeeData.Len() {
return nil, nil, errors.Errorf(
if len(blockTxsAcceptanceData) != blockFeeData.Len() {
return nil, nil, fmt.Errorf(
"length of accepted transaction data(%d) and fee data(%d) is not equal for block %s",
len(blockTxsAcceptanceData.TxAcceptanceData), blockFeeData.Len(), blueBlock.hash)
len(blockTxsAcceptanceData), blockFeeData.Len(), blueBlock.hash)
}
txIn := &wire.TxIn{
@@ -247,24 +248,24 @@ func coinbaseInputAndOutputForBlueBlock(dag *BlockDAG, blueBlock *blockNode,
totalFees := uint64(0)
feeIterator := blockFeeData.iterator()
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData {
fee, err := feeIterator.next()
if err != nil {
return nil, nil, errors.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
return nil, nil, fmt.Errorf("Error retrieving fee from compactFeeData iterator: %s", err)
}
if txAcceptanceData.IsAccepted {
totalFees += fee
}
}
totalReward := CalcBlockSubsidy(blueBlock.blueScore, dag.dagParams) + totalFees
totalReward := CalcBlockSubsidy(blueBlock.height, dag.dagParams) + totalFees
if totalReward == 0 {
return txIn, nil, nil
}
// the ScriptPubKey for the coinbase is parsed from the coinbase payload
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData.TxAcceptanceData[0].Tx.MsgTx())
scriptPubKey, _, err := DeserializeCoinbasePayload(blockTxsAcceptanceData[0].Tx.MsgTx())
if err != nil {
return nil, nil, err
}

View File

@@ -7,7 +7,7 @@ package blockdag
import (
"compress/bzip2"
"encoding/binary"
"github.com/pkg/errors"
"fmt"
"io"
"os"
"path/filepath"
@@ -16,11 +16,11 @@ import (
"testing"
"time"
"github.com/kaspanet/kaspad/dagconfig"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/dagconfig"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
func loadBlocksWithLog(t *testing.T, filename string) ([]*util.Block, error) {
@@ -185,7 +185,7 @@ func checkRuleError(gotErr, wantErr error) error {
// Ensure the error code is of the expected type and the error
// code matches the value specified in the test instance.
if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) {
return errors.Errorf("wrong error - got %T (%[1]v), want %T",
return fmt.Errorf("wrong error - got %T (%[1]v), want %T",
gotErr, wantErr)
}
if gotErr == nil {
@@ -195,7 +195,7 @@ func checkRuleError(gotErr, wantErr error) error {
// Ensure the want error type is a script error.
werr, ok := wantErr.(RuleError)
if !ok {
return errors.Errorf("unexpected test error type %T", wantErr)
return fmt.Errorf("unexpected test error type %T", wantErr)
}
// Ensure the error codes match. It's safe to use a raw type assert
@@ -203,7 +203,7 @@ func checkRuleError(gotErr, wantErr error) error {
// the want error is a script error.
gotErrorCode := gotErr.(RuleError).ErrorCode
if gotErrorCode != werr.ErrorCode {
return errors.Errorf("mismatched error code - got %v (%v), want %v",
return fmt.Errorf("mismatched error code - got %v (%v), want %v",
gotErrorCode, gotErr, werr.ErrorCode)
}

View File

@@ -5,8 +5,8 @@
package blockdag
import (
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/txscript"
"github.com/daglabs/btcd/btcec"
"github.com/daglabs/btcd/txscript"
)
// -----------------------------------------------------------------------------

View File

@@ -5,27 +5,30 @@
package blockdag
import (
"errors"
"fmt"
"github.com/pkg/errors"
"math"
"sort"
"sync"
"time"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
// maxOrphanBlocks is the maximum number of orphan blocks that can be
// queued.
maxOrphanBlocks = 100
// FinalityInterval is the interval that determines the finality window of the DAG.
FinalityInterval = 100
)
// orphanBlock represents a block that we don't yet have the parent for. It
@@ -192,22 +195,10 @@ func (dag *BlockDAG) IsKnownOrphan(hash *daghash.Hash) bool {
return exists
}
// IsKnownInvalid returns whether the passed hash is known to be an invalid block.
// Note that if the block is not found this method will return false.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) IsKnownInvalid(hash *daghash.Hash) bool {
node := dag.index.LookupNode(hash)
if node == nil {
return false
}
return dag.index.NodeStatus(node).KnownInvalid()
}
// GetOrphanMissingAncestorHashes returns all of the missing parents in the orphan's sub-DAG
//
// This function is safe for concurrent access.
func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) ([]*daghash.Hash, error) {
func (dag *BlockDAG) GetOrphanMissingAncestorHashes(hash *daghash.Hash) ([]*daghash.Hash, error) {
// Protect concurrent access. Using a read lock only so multiple
// readers can query without blocking each other.
dag.orphanLock.RLock()
@@ -216,7 +207,7 @@ func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) ([
missingAncestorsHashes := make([]*daghash.Hash, 0)
visited := make(map[daghash.Hash]bool)
queue := []*daghash.Hash{orphanHash}
queue := []*daghash.Hash{hash}
for len(queue) > 0 {
var current *daghash.Hash
current, queue = queue[0], queue[1:]
@@ -228,7 +219,7 @@ func (dag *BlockDAG) GetOrphanMissingAncestorHashes(orphanHash *daghash.Hash) ([
queue = append(queue, parentHash)
}
} else {
if !dag.BlockExists(current) && current != orphanHash {
if !dag.BlockExists(current) {
missingAncestorsHashes = append(missingAncestorsHashes, current)
}
}
@@ -505,10 +496,10 @@ func (dag *BlockDAG) addBlock(node *blockNode, parentNodes blockSet,
return chainUpdates, nil
}
func calculateAcceptedIDMerkleRoot(multiBlockTxsAcceptanceData MultiBlockTxsAcceptanceData) *daghash.Hash {
func calculateAcceptedIDMerkleRoot(txsAcceptanceData MultiBlockTxsAcceptanceData) *daghash.Hash {
var acceptedTxs []*util.Tx
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
for _, txAcceptance := range blockTxsAcceptanceData.TxAcceptanceData {
for _, blockTxsAcceptanceData := range txsAcceptanceData {
for _, txAcceptance := range blockTxsAcceptanceData {
if !txAcceptance.IsAccepted {
continue
}
@@ -592,7 +583,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
panic(err)
}
err = dag.saveChangesFromBlock(block, virtualUTXODiff, txsAcceptanceData, virtualTxsAcceptanceData, newBlockFeeData)
err = dag.saveChangesFromBlock(node, block, virtualUTXODiff, txsAcceptanceData, virtualTxsAcceptanceData, newBlockFeeData)
if err != nil {
return nil, err
}
@@ -600,7 +591,7 @@ func (dag *BlockDAG) connectBlock(node *blockNode,
return chainUpdates, nil
}
func (dag *BlockDAG) saveChangesFromBlock(block *util.Block, virtualUTXODiff *UTXODiff,
func (dag *BlockDAG) saveChangesFromBlock(node *blockNode, block *util.Block, virtualUTXODiff *UTXODiff,
txsAcceptanceData MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData MultiBlockTxsAcceptanceData,
feeData compactFeeData) error {
@@ -689,7 +680,7 @@ func (dag *BlockDAG) validateGasLimit(block *util.Block) error {
currentGasUsage = 0
currentSubnetworkGasLimit, err = dag.SubnetworkStore.GasLimit(currentSubnetworkID)
if err != nil {
return errors.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err)
return fmt.Errorf("Error getting gas limit for subnetworkID '%s': %s", currentSubnetworkID, err)
}
}
@@ -745,14 +736,14 @@ func (dag *BlockDAG) updateFinalityPoint() {
}
// We are looking for a new finality point only if the new block's finality score is higher
// by 2 than the existing finality point's
if selectedTip.finalityScore(dag) < dag.lastFinalityPoint.finalityScore(dag)+2 {
if selectedTip.finalityScore() < dag.lastFinalityPoint.finalityScore()+2 {
return
}
var currentNode *blockNode
for currentNode = selectedTip.selectedParent; ; currentNode = currentNode.selectedParent {
// We look for the first node in the selected parent chain that has a higher finality score than the last finality point.
if currentNode.selectedParent.finalityScore(dag) == dag.lastFinalityPoint.finalityScore(dag) {
if currentNode.selectedParent.finalityScore() == dag.lastFinalityPoint.finalityScore() {
break
}
}
@@ -769,7 +760,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) {
}
var blockHashesToDelete []*daghash.Hash
if deleteDiffData {
blockHashesToDelete = make([]*daghash.Hash, 0, dag.dagParams.FinalityInterval)
blockHashesToDelete = make([]*daghash.Hash, 0, FinalityInterval)
}
for len(queue) > 0 {
var current *blockNode
@@ -860,7 +851,7 @@ func (dag *BlockDAG) TxsAcceptedByVirtual() (MultiBlockTxsAcceptanceData, error)
func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlockTxsAcceptanceData, error) {
node := dag.index.LookupNode(blockHash)
if node == nil {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
return nil, fmt.Errorf("Couldn't find block %s", blockHash)
}
_, txsAcceptanceData, err := dag.pastUTXO(node)
return txsAcceptanceData, err
@@ -872,7 +863,7 @@ func (dag *BlockDAG) TxsAcceptedByBlockHash(blockHash *daghash.Hash) (MultiBlock
func (dag *BlockDAG) BlockPastUTXO(blockHash *daghash.Hash) (UTXOSet, error) {
node := dag.index.LookupNode(blockHash)
if node == nil {
return nil, errors.Errorf("Couldn't find block %s", blockHash)
return nil, fmt.Errorf("Couldn't find block %s", blockHash)
}
pastUTXO, _, err := dag.pastUTXO(node)
return pastUTXO, err
@@ -894,7 +885,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, block *util.Block, newBloc
chainUpdates *chainUpdates, err error) {
if err = node.updateParents(dag, newBlockUTXO); err != nil {
return nil, nil, nil, errors.Errorf("failed updating parents of %s: %s", node, err)
return nil, nil, nil, fmt.Errorf("failed updating parents of %s: %s", node, err)
}
// Update the virtual block's parents (the DAG tips) to include the new block.
@@ -903,7 +894,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, block *util.Block, newBloc
// Build a UTXO set for the new virtual block
newVirtualPastUTXO, virtualTxsAcceptanceData, err := dag.pastUTXO(&dag.virtual.blockNode)
if err != nil {
return nil, nil, nil, errors.Errorf("could not restore past UTXO for virtual %s: %s", dag.virtual, err)
return nil, nil, nil, fmt.Errorf("could not restore past UTXO for virtual %s: %s", dag.virtual, err)
}
// Apply the new virtual's blue score to all the unaccepted UTXOs
@@ -919,7 +910,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, block *util.Block, newBloc
// Apply new utxoDiffs to all the tips
err = updateTipsUTXO(dag, newVirtualUTXO)
if err != nil {
return nil, nil, nil, errors.Errorf("failed updating the tips' UTXO: %s", err)
return nil, nil, nil, fmt.Errorf("failed updating the tips' UTXO: %s", err)
}
// It is now safe to meld the UTXO set to base.
@@ -927,7 +918,7 @@ func (dag *BlockDAG) applyDAGChanges(node *blockNode, block *util.Block, newBloc
virtualUTXODiff = diffSet.UTXODiff
err = dag.meldVirtualUTXO(diffSet)
if err != nil {
return nil, nil, nil, errors.Errorf("failed melding the virtual UTXO: %s", err)
return nil, nil, nil, fmt.Errorf("failed melding the virtual UTXO: %s", err)
}
dag.index.SetStatusFlags(node, statusValid)
@@ -963,11 +954,11 @@ func (node *blockNode) diffFromTxs(pastUTXO UTXOSet, transactions []*util.Tx) (*
// diffFromAccpetanceData creates a diff that "updates" the blue scores of the given
// UTXOSet with the node's blueScore according to the given acceptance data.
func (node *blockNode) diffFromAcceptanceData(pastUTXO UTXOSet, multiBlockTxsAcceptanceData MultiBlockTxsAcceptanceData) (*UTXODiff, error) {
func (node *blockNode) diffFromAcceptanceData(pastUTXO UTXOSet, blockTxsAcceptanceDatas MultiBlockTxsAcceptanceData) (*UTXODiff, error) {
diff := NewUTXODiff()
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
for _, blockTxsAcceptanceData := range blockTxsAcceptanceDatas {
for _, txAcceptanceData := range blockTxsAcceptanceData {
if txAcceptanceData.IsAccepted {
acceptanceDiff, err := pastUTXO.diffFromAcceptedTx(txAcceptanceData.Tx.MsgTx(), node.blueScore)
if err != nil {
@@ -1041,26 +1032,13 @@ type TxAcceptanceData struct {
IsAccepted bool
}
// BlockTxsAcceptanceData stores all transactions in a block with an indication
// BlockTxsAcceptanceData stores all transactions in a block with an indication
// if they were accepted or not by some other block
type BlockTxsAcceptanceData struct {
BlockHash daghash.Hash
TxAcceptanceData []TxAcceptanceData
}
type BlockTxsAcceptanceData []TxAcceptanceData
// MultiBlockTxsAcceptanceData stores data about which transactions were accepted by a block
// It's a slice of the block's blues block IDs and their transaction acceptance data
type MultiBlockTxsAcceptanceData []BlockTxsAcceptanceData
// FindAcceptanceData finds the BlockTxsAcceptanceData that matches blockHash
func (data MultiBlockTxsAcceptanceData) FindAcceptanceData(blockHash *daghash.Hash) (*BlockTxsAcceptanceData, bool) {
for _, acceptanceData := range data {
if acceptanceData.BlockHash.IsEqual(blockHash) {
return &acceptanceData, true
}
}
return nil, false
}
// It's a map from the block's blues block IDs to the transaction acceptance data
type MultiBlockTxsAcceptanceData map[daghash.Hash]BlockTxsAcceptanceData
func genesisPastUTXO(virtual *virtualBlock) UTXOSet {
// The genesis has no past UTXO, so we create an empty UTXO
@@ -1101,21 +1079,14 @@ func (node *blockNode) fetchBlueBlocks(db database.DB) ([]*util.Block, error) {
// Purposefully ignoring failures - these are just unaccepted transactions
// Writing down which transactions were accepted or not in txsAcceptanceData
func (node *blockNode) applyBlueBlocks(selectedParentUTXO UTXOSet, blueBlocks []*util.Block) (
pastUTXO UTXOSet, multiBlockTxsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO UTXOSet, txsAcceptanceData MultiBlockTxsAcceptanceData, err error) {
pastUTXO = selectedParentUTXO
multiBlockTxsAcceptanceData = MultiBlockTxsAcceptanceData{}
txsAcceptanceData = MultiBlockTxsAcceptanceData{}
// Add blueBlocks to multiBlockTxsAcceptanceData bottom-to-top instead of
// top-to-bottom. This is so that anyone who iterates over it would process
// blocks (and transactions) in their order of appearance in the DAG.
for i := len(blueBlocks) - 1; i >= 0; i-- {
blueBlock := blueBlocks[i]
for _, blueBlock := range blueBlocks {
transactions := blueBlock.Transactions()
blockTxsAcceptanceData := BlockTxsAcceptanceData{
BlockHash: *blueBlock.Hash(),
TxAcceptanceData: make([]TxAcceptanceData, len(transactions)),
}
blockTxsAcceptanceData := make(BlockTxsAcceptanceData, len(transactions))
isSelectedParent := blueBlock.Hash().IsEqual(node.selectedParent.hash)
for i, tx := range blueBlock.Transactions() {
var isAccepted bool
@@ -1127,12 +1098,12 @@ func (node *blockNode) applyBlueBlocks(selectedParentUTXO UTXOSet, blueBlocks []
return nil, nil, err
}
}
blockTxsAcceptanceData.TxAcceptanceData[i] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
blockTxsAcceptanceData[i] = TxAcceptanceData{Tx: tx, IsAccepted: isAccepted}
}
multiBlockTxsAcceptanceData = append(multiBlockTxsAcceptanceData, blockTxsAcceptanceData)
txsAcceptanceData[*blueBlock.Hash()] = blockTxsAcceptanceData
}
return pastUTXO, multiBlockTxsAcceptanceData, nil
return pastUTXO, txsAcceptanceData, nil
}
// updateParents adds this block to the children sets of its parents
@@ -1354,7 +1325,7 @@ func (dag *BlockDAG) GetUTXOEntry(outpoint wire.Outpoint) (*UTXOEntry, bool) {
func (dag *BlockDAG) BlueScoreByBlockHash(hash *daghash.Hash) (uint64, error) {
node := dag.index.LookupNode(hash)
if node == nil {
return 0, errors.Errorf("block %s is unknown", hash)
return 0, fmt.Errorf("block %s is unknown", hash)
}
return node.blueScore, nil
@@ -1381,7 +1352,7 @@ func (dag *BlockDAG) BlockConfirmationsByHashNoLock(hash *daghash.Hash) (uint64,
node := dag.index.LookupNode(hash)
if node == nil {
return 0, errors.Errorf("block %s is unknown", hash)
return 0, fmt.Errorf("block %s is unknown", hash)
}
return dag.blockConfirmations(node)
@@ -1394,13 +1365,9 @@ func (dag *BlockDAG) UTXOCommitment() string {
// blockConfirmations returns the current confirmations number of the given node
// The confirmations number is defined as follows:
// * If the node is in the selected tip red set -> 0
// * If the node is the selected tip -> 1
// * Otherwise -> selectedTip.blueScore - acceptingBlock.blueScore + 2
// * If the node is red -> 0
// * Otherwise -> virtual.blueScore - acceptingBlock.blueScore + 1
func (dag *BlockDAG) blockConfirmations(node *blockNode) (uint64, error) {
if node == dag.selectedTip() {
return 1, nil
}
acceptingBlock, err := dag.acceptingBlock(node)
if err != nil {
return 0, err
@@ -1411,41 +1378,47 @@ func (dag *BlockDAG) blockConfirmations(node *blockNode) (uint64, error) {
return 0, nil
}
return dag.selectedTip().blueScore - acceptingBlock.blueScore + 2, nil
return dag.virtual.blueScore - acceptingBlock.blueScore + 1, nil
}
// acceptingBlock finds the node in the selected-parent chain that had accepted
// the given node
func (dag *BlockDAG) acceptingBlock(node *blockNode) (*blockNode, error) {
// Explicitly handle the DAG tips
if dag.virtual.tips().contains(node) {
// Return the virtual block if the node is one of the DAG blues
for _, tip := range dag.virtual.blues {
if tip == node {
return &dag.virtual.blockNode, nil
}
}
// Otherwise, this tip is red and doesn't have an accepting block
return nil, nil
}
// Return an error if the node is the virtual block
if node == &dag.virtual.blockNode {
return nil, errors.New("cannot get acceptingBlock for virtual")
if len(node.children) == 0 {
if node == &dag.virtual.blockNode {
return nil, errors.New("cannot get acceptingBlock for virtual")
}
// A childless block that isn't a tip or the virtual can never happen. Panicking
panic(fmt.Errorf("got childless block %s that is neither a tip nor the virtual", node.hash))
}
// If the node is a chain-block itself, the accepting block is its chain-child
if dag.IsInSelectedParentChain(node.hash) {
if len(node.children) == 0 {
// If the node is the selected tip, it doesn't have an accepting block
return nil, nil
}
for _, child := range node.children {
if dag.IsInSelectedParentChain(child.hash) {
return child, nil
}
}
return nil, errors.Errorf("chain block %s does not have a chain child", node.hash)
return nil, fmt.Errorf("chain block %s does not have a chain child", node.hash)
}
// Find the only chain block that may contain the node in its blues
candidateAcceptingBlock := dag.oldestChainBlockWithBlueScoreGreaterThan(node.blueScore)
// if no candidate is found, it means that the node has same or more
// blue score than the selected tip and is found in its anticone, so
// it doesn't have an accepting block
if candidateAcceptingBlock == nil {
return nil, nil
}
// candidateAcceptingBlock is the accepting block only if it actually contains
// the node in its blues
for _, blue := range candidateAcceptingBlock.blues {
@@ -1454,8 +1427,7 @@ func (dag *BlockDAG) acceptingBlock(node *blockNode) (*blockNode, error) {
}
}
// Otherwise, the node is red or in the selected tip anticone, and
// doesn't have an accepting block
// Otherwise, the node is red and doesn't have an accepting block
return nil, nil
}
@@ -1492,7 +1464,7 @@ func (dag *BlockDAG) SelectedParentChain(startHash *daghash.Hash) ([]*daghash.Ha
startHash = dag.genesis.hash
}
if !dag.BlockExists(startHash) {
return nil, nil, errors.Errorf("startHash %s does not exist in the DAG", startHash)
return nil, nil, fmt.Errorf("startHash %s does not exist in the DAG", startHash)
}
// If startHash is not in the selected parent chain, go down its selected parent chain
@@ -1562,7 +1534,7 @@ func (dag *BlockDAG) CurrentBits() uint32 {
func (dag *BlockDAG) HeaderByHash(hash *daghash.Hash) (*wire.BlockHeader, error) {
node := dag.index.LookupNode(hash)
if node == nil {
err := errors.Errorf("block %s is not known", hash)
err := fmt.Errorf("block %s is not known", hash)
return &wire.BlockHeader{}, err
}
@@ -1598,39 +1570,39 @@ func (dag *BlockDAG) ChildHashesByHash(hash *daghash.Hash) ([]*daghash.Hash, err
return node.children.hashes(), nil
}
// ChainHeightToHashRange returns a range of block hashes for the given start chain
// height and end hash, inclusive on both ends. The hashes are for all blocks that
// are ancestors of endHash with height greater than or equal to startChainHeight.
// The end hash must belong to a block that is known to be valid.
// HeightToHashRange returns a range of block hashes for the given start height
// and end hash, inclusive on both ends. The hashes are for all blocks that are
// ancestors of endHash with height greater than or equal to startHeight. The
// end hash must belong to a block that is known to be valid.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) ChainHeightToHashRange(startChainHeight uint64,
func (dag *BlockDAG) HeightToHashRange(startHeight uint64,
endHash *daghash.Hash, maxResults int) ([]*daghash.Hash, error) {
endNode := dag.index.LookupNode(endHash)
if endNode == nil {
return nil, errors.Errorf("no known block header with hash %s", endHash)
return nil, fmt.Errorf("no known block header with hash %s", endHash)
}
if !dag.index.NodeStatus(endNode).KnownValid() {
return nil, errors.Errorf("block %s is not yet validated", endHash)
return nil, fmt.Errorf("block %s is not yet validated", endHash)
}
endChainHeight := endNode.chainHeight
endHeight := endNode.height
if startChainHeight < 0 {
return nil, errors.Errorf("start chain height (%d) is below 0", startChainHeight)
if startHeight < 0 {
return nil, fmt.Errorf("start height (%d) is below 0", startHeight)
}
if startChainHeight > endChainHeight {
return nil, errors.Errorf("start chain height (%d) is past end chain height (%d)",
startChainHeight, endChainHeight)
if startHeight > endHeight {
return nil, fmt.Errorf("start height (%d) is past end height (%d)",
startHeight, endHeight)
}
resultsLength := int(endChainHeight - startChainHeight + 1)
resultsLength := int(endHeight - startHeight + 1)
if resultsLength > maxResults {
return nil, errors.Errorf("number of results (%d) would exceed max (%d)",
return nil, fmt.Errorf("number of results (%d) would exceed max (%d)",
resultsLength, maxResults)
}
// Walk backwards from endChainHeight to startChainHeight, collecting block hashes.
// Walk backwards from endHeight to startHeight, collecting block hashes.
node := endNode
hashes := make([]*daghash.Hash, resultsLength)
for i := resultsLength - 1; i >= 0; i-- {
@@ -1649,21 +1621,21 @@ func (dag *BlockDAG) IntervalBlockHashes(endHash *daghash.Hash, interval uint64,
endNode := dag.index.LookupNode(endHash)
if endNode == nil {
return nil, errors.Errorf("no known block header with hash %s", endHash)
return nil, fmt.Errorf("no known block header with hash %s", endHash)
}
if !dag.index.NodeStatus(endNode).KnownValid() {
return nil, errors.Errorf("block %s is not yet validated", endHash)
return nil, fmt.Errorf("block %s is not yet validated", endHash)
}
endChainHeight := endNode.chainHeight
endHeight := endNode.height
resultsLength := endChainHeight / interval
resultsLength := endHeight / interval
hashes := make([]*daghash.Hash, resultsLength)
dag.virtual.mtx.Lock()
defer dag.virtual.mtx.Unlock()
blockNode := endNode
for index := endChainHeight / interval; index > 0; index-- {
for index := endHeight / interval; index > 0; index-- {
blockHeight := index * interval
blockNode = blockNode.SelectedAncestor(blockHeight)
@@ -1678,26 +1650,23 @@ func (dag *BlockDAG) IntervalBlockHashes(endHash *daghash.Hash, interval uint64,
// provided max number of block hashes.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) getBlueBlocksHashesBetween(startHash, stopHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) {
nodes, err := dag.getBlueBlocksBetween(startHash, stopHash, maxHashes)
if err != nil {
return nil, err
}
func (dag *BlockDAG) getBlueBlocksHashesBetween(startHash, stopHash *daghash.Hash, maxHashes uint64) []*daghash.Hash {
nodes := dag.getBlueBlocksBetween(startHash, stopHash, maxHashes)
hashes := make([]*daghash.Hash, len(nodes))
for i, node := range nodes {
hashes[i] = node.hash
}
return hashes, nil
return hashes
}
func (dag *BlockDAG) getBlueBlocksBetween(startHash, stopHash *daghash.Hash, maxEntries uint64) ([]*blockNode, error) {
func (dag *BlockDAG) getBlueBlocksBetween(startHash, stopHash *daghash.Hash, maxEntries uint64) []*blockNode {
startNode := dag.index.LookupNode(startHash)
if startNode == nil {
return nil, errors.Errorf("Couldn't find start hash %s", startHash)
return nil
}
stopNode := dag.index.LookupNode(stopHash)
if stopNode == nil {
return nil, errors.Errorf("Couldn't find stop hash %s", stopHash)
stopNode = dag.selectedTip()
}
// In order to get no more then maxEntries of blue blocks from
@@ -1718,22 +1687,16 @@ func (dag *BlockDAG) getBlueBlocksBetween(startHash, stopHash *daghash.Hash, max
// Populate and return the found nodes.
nodes := make([]*blockNode, 0, stopNode.blueScore-startNode.blueScore+1)
nodes = append(nodes, stopNode)
current := stopNode
for current.blueScore > startNode.blueScore {
for current := stopNode; current != startNode; current = current.selectedParent {
for _, blue := range current.blues {
nodes = append(nodes, blue)
}
current = current.selectedParent
}
if current != startNode {
return nil, errors.Errorf("the start hash is not found in the " +
"selected parent chain of the stop hash")
}
reversedNodes := make([]*blockNode, len(nodes))
for i, node := range nodes {
reversedNodes[len(reversedNodes)-i-1] = node
}
return reversedNodes, nil
return reversedNodes
}
// GetBlueBlocksHashesBetween returns the hashes of the blue blocks after the
@@ -1741,14 +1704,11 @@ func (dag *BlockDAG) getBlueBlocksBetween(startHash, stopHash *daghash.Hash, max
// provided max number of block hashes.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) GetBlueBlocksHashesBetween(startHash, stopHash *daghash.Hash, maxHashes uint64) ([]*daghash.Hash, error) {
func (dag *BlockDAG) GetBlueBlocksHashesBetween(startHash, stopHash *daghash.Hash, maxHashes uint64) []*daghash.Hash {
dag.dagLock.RLock()
hashes, err := dag.getBlueBlocksHashesBetween(startHash, stopHash, maxHashes)
if err != nil {
return nil, err
}
hashes := dag.getBlueBlocksHashesBetween(startHash, stopHash, maxHashes)
dag.dagLock.RUnlock()
return hashes, nil
return hashes
}
// getBlueBlocksHeadersBetween returns the headers of the blue blocks after the
@@ -1756,16 +1716,13 @@ func (dag *BlockDAG) GetBlueBlocksHashesBetween(startHash, stopHash *daghash.Has
// provided max number of block headers.
//
// This function MUST be called with the DAG state lock held (for reads).
func (dag *BlockDAG) getBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash, maxHeaders uint64) ([]*wire.BlockHeader, error) {
nodes, err := dag.getBlueBlocksBetween(startHash, stopHash, maxHeaders)
if err != nil {
return nil, err
}
func (dag *BlockDAG) getBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash, maxHeaders uint64) []*wire.BlockHeader {
nodes := dag.getBlueBlocksBetween(startHash, stopHash, maxHeaders)
headers := make([]*wire.BlockHeader, len(nodes))
for i, node := range nodes {
headers[i] = node.Header()
}
return headers, nil
return headers
}
// GetTopHeaders returns the top wire.MaxBlockHeadersPerMsg block headers ordered by height.
@@ -1774,7 +1731,7 @@ func (dag *BlockDAG) GetTopHeaders(startHash *daghash.Hash) ([]*wire.BlockHeader
if startHash != nil {
startNode = dag.index.LookupNode(startHash)
if startNode == nil {
return nil, errors.Errorf("Couldn't find the start hash %s in the dag", startHash)
return nil, fmt.Errorf("Couldn't find the start hash %s in the dag", startHash)
}
}
headers := make([]*wire.BlockHeader, 0, startNode.blueScore)
@@ -1794,16 +1751,6 @@ func (dag *BlockDAG) GetTopHeaders(startHash *daghash.Hash) ([]*wire.BlockHeader
return headers, nil
}
// Lock locks the DAG's UTXO set for writing.
func (dag *BlockDAG) Lock() {
dag.dagLock.Lock()
}
// Unlock unlocks the DAG's UTXO set for writing.
func (dag *BlockDAG) Unlock() {
dag.dagLock.Unlock()
}
// RLock locks the DAG's UTXO set for reading.
func (dag *BlockDAG) RLock() {
dag.dagLock.RLock()
@@ -1819,14 +1766,11 @@ func (dag *BlockDAG) RUnlock() {
// provided max number of block headers.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) GetBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash) ([]*wire.BlockHeader, error) {
func (dag *BlockDAG) GetBlueBlocksHeadersBetween(startHash, stopHash *daghash.Hash) []*wire.BlockHeader {
dag.dagLock.RLock()
headers, err := dag.getBlueBlocksHeadersBetween(startHash, stopHash, wire.MaxBlockHeadersPerMsg)
if err != nil {
return nil, err
}
headers := dag.getBlueBlocksHeadersBetween(startHash, stopHash, wire.MaxBlockHeadersPerMsg)
dag.dagLock.RUnlock()
return headers, nil
return headers
}
// SubnetworkID returns the node's subnetwork ID

View File

@@ -5,8 +5,8 @@
package blockdag
import (
"errors"
"fmt"
"github.com/pkg/errors"
"os"
"path/filepath"
"reflect"
@@ -18,13 +18,13 @@ import (
"math/rand"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/wire"
)
func TestBlockCount(t *testing.T) {
@@ -594,10 +594,10 @@ func testTip(nodes []*blockNode) *blockNode {
return nodes[len(nodes)-1]
}
// TestChainHeightToHashRange ensures that fetching a range of block hashes by start
// chain height and end hash works as expected.
func TestChainHeightToHashRange(t *testing.T) {
// Construct a synthetic block DAG with a block index consisting of
// TestHeightToHashRange ensures that fetching a range of block hashes by start
// height and end hash works as expected.
func TestHeightToHashRange(t *testing.T) {
// Construct a synthetic block chain with a block index consisting of
// the following structure.
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
// \-> 16a -> 17a -> 18a (unvalidated)
@@ -610,7 +610,7 @@ func TestChainHeightToHashRange(t *testing.T) {
blockDAG.index.AddNode(node)
}
for _, node := range branch1Nodes {
if node.chainHeight < 18 {
if node.height < 18 {
blockDAG.index.SetStatusFlags(node, statusValid)
}
blockDAG.index.AddNode(node)
@@ -618,59 +618,59 @@ func TestChainHeightToHashRange(t *testing.T) {
blockDAG.virtual.SetTips(setFromSlice(tip(branch0Nodes)))
tests := []struct {
name string
startChainHeight uint64 // locator for requested inventory
endHash *daghash.Hash // stop hash for locator
maxResults int // max to locate, 0 = wire const
hashes []*daghash.Hash // expected located hashes
expectError bool
name string
startHeight uint64 // locator for requested inventory
endHash *daghash.Hash // stop hash for locator
maxResults int // max to locate, 0 = wire const
hashes []*daghash.Hash // expected located hashes
expectError bool
}{
{
name: "blocks below tip",
startChainHeight: 11,
endHash: branch0Nodes[14].hash,
maxResults: 10,
hashes: nodeHashes(branch0Nodes, 10, 11, 12, 13, 14),
name: "blocks below tip",
startHeight: 11,
endHash: branch0Nodes[14].hash,
maxResults: 10,
hashes: nodeHashes(branch0Nodes, 10, 11, 12, 13, 14),
},
{
name: "blocks on main chain",
startChainHeight: 15,
endHash: branch0Nodes[17].hash,
maxResults: 10,
hashes: nodeHashes(branch0Nodes, 14, 15, 16, 17),
name: "blocks on main chain",
startHeight: 15,
endHash: branch0Nodes[17].hash,
maxResults: 10,
hashes: nodeHashes(branch0Nodes, 14, 15, 16, 17),
},
{
name: "blocks on stale chain",
startChainHeight: 15,
endHash: branch1Nodes[1].hash,
maxResults: 10,
name: "blocks on stale chain",
startHeight: 15,
endHash: branch1Nodes[1].hash,
maxResults: 10,
hashes: append(nodeHashes(branch0Nodes, 14),
nodeHashes(branch1Nodes, 0, 1)...),
},
{
name: "invalid start chain height",
startChainHeight: 19,
endHash: branch0Nodes[17].hash,
maxResults: 10,
expectError: true,
name: "invalid start height",
startHeight: 19,
endHash: branch0Nodes[17].hash,
maxResults: 10,
expectError: true,
},
{
name: "too many results",
startChainHeight: 1,
endHash: branch0Nodes[17].hash,
maxResults: 10,
expectError: true,
name: "too many results",
startHeight: 1,
endHash: branch0Nodes[17].hash,
maxResults: 10,
expectError: true,
},
{
name: "unvalidated block",
startChainHeight: 15,
endHash: branch1Nodes[2].hash,
maxResults: 10,
expectError: true,
name: "unvalidated block",
startHeight: 15,
endHash: branch1Nodes[2].hash,
maxResults: 10,
expectError: true,
},
}
for _, test := range tests {
hashes, err := blockDAG.ChainHeightToHashRange(test.startChainHeight, test.endHash,
hashes, err := blockDAG.HeightToHashRange(test.startHeight, test.endHash,
test.maxResults)
if err != nil {
if !test.expectError {
@@ -702,7 +702,7 @@ func TestIntervalBlockHashes(t *testing.T) {
dag.index.AddNode(node)
}
for _, node := range branch1Nodes {
if node.chainHeight < 18 {
if node.height < 18 {
dag.index.SetStatusFlags(node, statusValid)
}
dag.index.AddNode(node)
@@ -1042,9 +1042,13 @@ func TestConfirmations(t *testing.T) {
if err != nil {
t.Fatalf("TestConfirmations: confirmations for tip unexpectedly failed: %s", err)
}
expectedConfirmations := uint64(0)
if tip == dag.selectedTip() {
expectedConfirmations = 1
acceptingBlock, err := dag.acceptingBlock(tip)
if err != nil {
t.Fatalf("TestConfirmations: dag.acceptingBlock unexpectedly failed: %s", err)
}
expectedConfirmations := uint64(1)
if acceptingBlock == nil {
expectedConfirmations = 0
}
if tipConfirmations != expectedConfirmations {
t.Fatalf("TestConfirmations: unexpected confirmations for tip. "+
@@ -1086,7 +1090,7 @@ func TestConfirmations(t *testing.T) {
func TestAcceptingBlock(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimNetParams
params.K = 3
params.K = 1
dag, teardownFunc, err := DAGSetup("TestAcceptingBlock", Config{
DAGParams: &params,
})
@@ -1101,16 +1105,15 @@ func TestAcceptingBlock(t *testing.T) {
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for genesis block unexpectedly failed: %s", err)
}
if genesisAcceptingBlock != nil {
if genesisAcceptingBlock != &dag.virtual.blockNode {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for genesis block. "+
"Want: nil, got: %s", genesisAcceptingBlock.hash)
"Want: virtual, got: %s", genesisAcceptingBlock.hash)
}
numChainBlocks := uint32(10)
chainBlocks := make([]*blockNode, numChainBlocks)
chainBlocks := make([]*blockNode, 5)
chainBlocks[0] = dag.genesis
buildNode := buildNodeGenerator(dag.dagParams.K, true)
for i := uint32(1); i <= numChainBlocks-1; i++ {
for i := uint32(1); i <= 4; i++ {
chainBlocks[i] = buildNode(setFromSlice(chainBlocks[i-1]))
dag.virtual.AddTip(chainBlocks[i])
}
@@ -1128,65 +1131,16 @@ func TestAcceptingBlock(t *testing.T) {
}
}
// Make sure that the selected tip doesn't have an accepting
tipAcceptingBlock, err := dag.acceptingBlock(chainBlocks[len(chainBlocks)-1])
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for tip unexpectedly failed: %s", err)
}
if tipAcceptingBlock != nil {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for tip. "+
"Want: nil, got: %s", tipAcceptingBlock.hash)
}
// Generate side-chain of dag.dagParams.K + 1 blocks so its tip
// will be in the blues of the virtual but in the anticone of
// the selected tip.
branchingChainTip := chainBlocks[len(chainBlocks)-2]
for i := uint32(0); i < dag.dagParams.K+1; i++ {
nextBranchingChainTip := buildNode(setFromSlice(branchingChainTip))
dag.virtual.AddTip(nextBranchingChainTip)
branchingChainTip = nextBranchingChainTip
}
// Make sure that branchingChainTip is not in the selected parent chain
if dag.IsInSelectedParentChain(branchingChainTip.hash) {
t.Fatalf("TestAcceptingBlock: branchingChainTip wasn't expected to be in the selected parent chain")
}
// Make sure that branchingChainTip is in the virtual blues
isVirtualBlue := false
for _, virtualBlue := range dag.virtual.blues {
if branchingChainTip == virtualBlue {
isVirtualBlue = true
break
}
}
if !isVirtualBlue {
t.Fatalf("TestAcceptingBlock: redChainBlock was expected to be a virtual blue")
}
// Make sure that a block that is in the anticone of the selected tip and
// in the blues of the virtual doesn't have an accepting block
branchingChainTipAcceptionBlock, err := dag.acceptingBlock(branchingChainTip)
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for red chain block unexpectedly failed: %s", err)
}
if branchingChainTipAcceptionBlock != nil {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for branchingChainTipAcceptionBlock. "+
"Want: nil, got: %s", branchingChainTipAcceptionBlock.hash)
}
// Add K + 1 branching blocks
intersectionBlock := chainBlocks[1]
sideChainTip := buildNode(setFromSlice(intersectionBlock))
i := uint32(0)
for ; i < dag.dagParams.K+1; sideChainTip = buildNode(setFromSlice(sideChainTip)) {
dag.virtual.AddTip(sideChainTip)
i++
}
branchingBlocks := make([]*blockNode, 2)
// Add two branching blocks
branchingBlocks[0] = buildNode(setFromSlice(chainBlocks[1]))
dag.virtual.AddTip(branchingBlocks[0])
branchingBlocks[1] = buildNode(setFromSlice(branchingBlocks[0]))
dag.virtual.AddTip(branchingBlocks[1])
// Make sure that the accepting block of the parent of the branching block didn't change
expectedAcceptingBlock := chainBlocks[2]
intersectionBlock := chainBlocks[1]
intersectionAcceptingBlock, err := dag.acceptingBlock(intersectionBlock)
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for intersection block unexpectedly failed: %s", err)
@@ -1196,18 +1150,44 @@ func TestAcceptingBlock(t *testing.T) {
"Want: %s, got: %s", expectedAcceptingBlock.hash, intersectionAcceptingBlock.hash)
}
// Make sure that a block that is found in the red set of the selected tip
// doesn't have an accepting block
newTip := buildNode(setFromSlice(sideChainTip, chainBlocks[len(chainBlocks)-1]))
dag.virtual.AddTip(newTip)
sideChainTipAcceptingBlock, err := dag.acceptingBlock(sideChainTip)
// Make sure that the accepting block of the chain tips is the virtual block
tipAcceptingBlock, err := dag.acceptingBlock(chainBlocks[len(chainBlocks)-1])
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for sideChainTip unexpectedly failed: %s", err)
t.Fatalf("TestAcceptingBlock: acceptingBlock for tip unexpectedly failed: %s", err)
}
if sideChainTipAcceptingBlock != nil {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for sideChainTip. "+
"Want: nil, got: %s", intersectionAcceptingBlock.hash)
if tipAcceptingBlock != &dag.virtual.blockNode {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for tip. "+
"Want: Virtual, got: %s", tipAcceptingBlock.hash)
}
// Generate 100 blocks to force the "main" chain to become red
branchingChainTip := branchingBlocks[1]
for i := uint32(0); i < 100; i++ {
nextBranchingChainTip := buildNode(setFromSlice(branchingChainTip))
dag.virtual.AddTip(nextBranchingChainTip)
branchingChainTip = nextBranchingChainTip
}
// Make sure that a red block returns nil
redChainBlock := chainBlocks[2]
redChainBlockAcceptionBlock, err := dag.acceptingBlock(redChainBlock)
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for red chain block unexpectedly failed: %s", err)
}
if redChainBlockAcceptionBlock != nil {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for red chain block. "+
"Want: nil, got: %s", redChainBlockAcceptionBlock.hash)
}
// Make sure that a red tip returns nil
redChainTip := chainBlocks[len(chainBlocks)-1]
redChainTipAcceptingBlock, err := dag.acceptingBlock(redChainTip)
if err != nil {
t.Fatalf("TestAcceptingBlock: acceptingBlock for red chain tip unexpectedly failed: %s", err)
}
if redChainTipAcceptingBlock != nil {
t.Fatalf("TestAcceptingBlock: unexpected acceptingBlock for red tip block. "+
"Want: nil, got: %s", redChainTipAcceptingBlock.hash)
}
}
@@ -1254,22 +1234,22 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
flushUTXODiffStore()
return node
}
finalityInterval := dag.dagParams.FinalityInterval
nodes := make([]*blockNode, 0, finalityInterval)
nodes := make([]*blockNode, 0, FinalityInterval)
currentNode := dag.genesis
nodes = append(nodes, currentNode)
for i := 0; i <= finalityInterval*2; i++ {
for i := 0; i <= FinalityInterval*2; i++ {
currentNode = addNode(currentNode)
nodes = append(nodes, currentNode)
}
// Manually set the last finality point
dag.lastFinalityPoint = nodes[finalityInterval-1]
dag.lastFinalityPoint = nodes[FinalityInterval-1]
dag.finalizeNodesBelowFinalityPoint(deleteDiffData)
flushUTXODiffStore()
for _, node := range nodes[:finalityInterval-1] {
for _, node := range nodes[:FinalityInterval-1] {
if !node.isFinalized {
t.Errorf("Node with blue score %d expected to be finalized", node.blueScore)
}
@@ -1287,7 +1267,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) {
}
}
for _, node := range nodes[finalityInterval-1:] {
for _, node := range nodes[FinalityInterval-1:] {
if node.isFinalized {
t.Errorf("Node with blue score %d wasn't expected to be finalized", node.blueScore)
}

View File

@@ -9,16 +9,15 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io"
"sync"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/binaryserializer"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/binaryserializer"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/daglabs/btcd/wire"
)
const (
@@ -419,7 +418,7 @@ func (dag *BlockDAG) initDAGState() error {
localSubnetworkID.SetBytes(localSubnetworkIDBytes)
}
if !localSubnetworkID.IsEqual(dag.subnetworkID) {
return errors.Errorf("Cannot start btcd with subnetwork ID %s because"+
return fmt.Errorf("Cannot start btcd with subnetwork ID %s because"+
" its database is already built with subnetwork ID %s. If you"+
" want to switch to a new database, please reset the"+
" database by starting btcd with --reset-db flag", dag.subnetworkID, localSubnetworkID)
@@ -684,6 +683,7 @@ func (dag *BlockDAG) deserializeBlockNode(blockRow []byte) (*blockNode, error) {
node.blues[i] = dag.index.LookupNode(hash)
}
node.height = calculateNodeHeight(node)
node.chainHeight = calculateChainHeight(node)
return node, nil
@@ -821,7 +821,7 @@ func (dag *BlockDAG) BlockHashesFrom(startHash *daghash.Hash, limit int) ([]*dag
blockHashes = append(blockHashes, dag.genesis.hash)
}
if !dag.BlockExists(startHash) {
return nil, errors.Errorf("block %s not found", startHash)
return nil, fmt.Errorf("block %s not found", startHash)
}
blueScore, err := dag.BlueScoreByBlockHash(startHash)
if err != nil {

View File

@@ -6,12 +6,12 @@ package blockdag
import (
"bytes"
"github.com/pkg/errors"
"errors"
"reflect"
"testing"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util/daghash"
)
// TestErrNotInDAG ensures the functions related to errNotInDAG work

View File

@@ -8,7 +8,7 @@ import (
"math/big"
"time"
"github.com/kaspanet/kaspad/util"
"github.com/daglabs/btcd/util"
)
// requiredDifficulty calculates the required difficulty for a

View File

@@ -5,14 +5,14 @@
package blockdag
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"math/big"
"testing"
"time"
"github.com/kaspanet/kaspad/util"
"github.com/daglabs/btcd/util"
)
// TestBigToCompact ensures BigToCompact converts big integers to the expected

View File

@@ -2,32 +2,31 @@ package blockdag_test
import (
"fmt"
"github.com/pkg/errors"
"math"
"testing"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/testtools"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/testtools"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/mining"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
// TestFinality checks that the finality mechanism works as expected.
// This is how the flow goes:
// 1) We build a chain of params.FinalityInterval blocks and call its tip altChainTip.
// 2) We build another chain (let's call it mainChain) of 2 * params.FinalityInterval
// 1) We build a chain of blockdag.FinalityInterval blocks and call its tip altChainTip.
// 2) We build another chain (let's call it mainChain) of 2 * blockdag.FinalityInterval
// blocks, which points to genesis, and then we check that the block in that
// chain with height of params.FinalityInterval is marked as finality point (This is
// chain with height of blockdag.FinalityInterval is marked as finality point (This is
// very predictable, because the blue score of each new block in a chain is the
// parents plus one).
// 3) We make a new child to block with height (2 * params.FinalityInterval - 1)
// 3) We make a new child to block with height (2 * blockdag.FinalityInterval - 1)
// in mainChain, and we check that connecting it to the DAG
// doesn't affect the last finality point.
// 4) We make a block that points to genesis, and check that it
@@ -39,7 +38,6 @@ import (
func TestFinality(t *testing.T) {
params := dagconfig.SimNetParams
params.K = 1
params.FinalityInterval = 100
dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", blockdag.Config{
DAGParams: &params,
})
@@ -59,11 +57,11 @@ func TestFinality(t *testing.T) {
return nil, err
}
if delay != 0 {
return nil, errors.Errorf("ProcessBlock: block " +
return nil, fmt.Errorf("ProcessBlock: block " +
"is too far in the future")
}
if isOrphan {
return nil, errors.Errorf("ProcessBlock: unexpected returned orphan block")
return nil, fmt.Errorf("ProcessBlock: unexpected returned orphan block")
}
return block, nil
@@ -72,8 +70,8 @@ func TestFinality(t *testing.T) {
genesis := util.NewBlock(params.GenesisBlock)
currentNode := genesis
// First we build a chain of params.FinalityInterval blocks for future use
for i := 0; i < params.FinalityInterval; i++ {
// First we build a chain of blockdag.FinalityInterval blocks for future use
for i := 0; i < blockdag.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -82,10 +80,10 @@ func TestFinality(t *testing.T) {
altChainTip := currentNode
// Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * params.FinalityInterval to be the last finality point
// Now we build a new chain of 2 * blockdag.FinalityInterval blocks, pointed to genesis, and
// we expect the block with height 1 * blockdag.FinalityInterval to be the last finality point
currentNode = genesis
for i := 0; i < params.FinalityInterval; i++ {
for i := 0; i < blockdag.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -94,7 +92,7 @@ func TestFinality(t *testing.T) {
expectedFinalityPoint := currentNode
for i := 0; i < params.FinalityInterval; i++ {
for i := 0; i < blockdag.FinalityInterval; i++ {
currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()})
if err != nil {
t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err)
@@ -167,9 +165,8 @@ func TestFinality(t *testing.T) {
// a getblocks message it should always be able to send
// all the necessary invs.
func TestFinalityInterval(t *testing.T) {
params := dagconfig.SimNetParams
if params.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("dagconfig.SimNetParams.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
if blockdag.FinalityInterval > wire.MaxInvPerMsg {
t.Errorf("blockdag.FinalityInterval should be lower or equal to wire.MaxInvPerMsg")
}
}
@@ -314,78 +311,6 @@ func TestChainedTransactions(t *testing.T) {
}
}
// TestOrderInDiffFromAcceptanceData makes sure that the order of transactions in
// dag.diffFromAcceptanceData is such that if txA is spent by txB then txA is processed
// before txB.
func TestOrderInDiffFromAcceptanceData(t *testing.T) {
// Create a new database and DAG instance to run tests against.
params := dagconfig.SimNetParams
params.K = math.MaxUint32
dag, teardownFunc, err := blockdag.DAGSetup("TestOrderInDiffFromAcceptanceData", blockdag.Config{
DAGParams: &params,
})
if err != nil {
t.Fatalf("Failed to setup DAG instance: %v", err)
}
defer teardownFunc()
dag.TestSetCoinbaseMaturity(0)
createBlock := func(previousBlock *util.Block) *util.Block {
// Prepare a transaction that spends the previous block's coinbase transaction
var txs []*wire.MsgTx
if !previousBlock.IsGenesis() {
previousCoinbaseTx := previousBlock.MsgBlock().Transactions[0]
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to build signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{TxID: *previousCoinbaseTx.TxID(), Index: 0},
SignatureScript: signatureScript,
Sequence: wire.MaxTxInSequenceNum,
}
txOut := &wire.TxOut{
ScriptPubKey: blockdag.OpTrueScript,
Value: uint64(1),
}
txs = append(txs, wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut}))
}
// Create the block
msgBlock, err := mining.PrepareBlockForTest(dag, &params, []*daghash.Hash{previousBlock.Hash()}, txs, false)
if err != nil {
t.Fatalf("TestOrderInDiffFromAcceptanceData: Failed to prepare block: %s", err)
}
// Add the block to the DAG
newBlock := util.NewBlock(msgBlock)
isOrphan, delay, err := dag.ProcessBlock(newBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Errorf("TestOrderInDiffFromAcceptanceData: %s", err)
}
if delay != 0 {
t.Fatalf("TestOrderInDiffFromAcceptanceData: block is too far in the future")
}
if isOrphan {
t.Fatalf("TestOrderInDiffFromAcceptanceData: block got unexpectedly orphaned")
}
return newBlock
}
// Create two block chains starting from the genesis block. Every time a block is added
// one of the chains is selected as the selected parent chain while all the blocks in
// the other chain (and their transactions) get accepted by the new virtual. If the
// transactions in the non-selected parent chain get processed in the wrong order then
// diffFromAcceptanceData panics.
blockAmountPerChain := 100
chainATip := util.NewBlock(params.GenesisBlock)
chainBTip := chainATip
for i := 0; i < blockAmountPerChain; i++ {
chainATip = createBlock(chainATip)
chainBTip = createBlock(chainBTip)
}
}
// TestGasLimit tests the gas limit rules
func TestGasLimit(t *testing.T) {
params := dagconfig.SimNetParams

View File

@@ -7,20 +7,20 @@ package blockdag_test
import (
"bytes"
"github.com/pkg/errors"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/fullblocktests"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
_ "github.com/kaspanet/kaspad/database/ffldb"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/blockdag/fullblocktests"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
@@ -62,7 +62,7 @@ func isSupportedDbType(dbType string) bool {
// a teardown function the caller should invoke when done testing to clean up.
func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func(), error) {
if !isSupportedDbType(testDbType) {
return nil, nil, errors.Errorf("unsupported db type %v", testDbType)
return nil, nil, fmt.Errorf("unsupported db type %v", testDbType)
}
// Handle memory database specially since it doesn't need the disk
@@ -72,7 +72,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
if testDbType == "memdb" {
ndb, err := database.Create(testDbType)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %v", err)
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
db = ndb
@@ -85,7 +85,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := errors.Errorf("unable to create test db "+
err := fmt.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
@@ -96,7 +96,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
_ = os.RemoveAll(dbPath)
ndb, err := database.Create(testDbType, dbPath, blockDataNet)
if err != nil {
return nil, nil, errors.Errorf("error creating db: %v", err)
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
db = ndb
@@ -123,7 +123,7 @@ func DAGSetup(dbName string, params *dagconfig.Params) (*blockdag.BlockDAG, func
})
if err != nil {
teardown()
err := errors.Errorf("failed to create chain instance: %v", err)
err := fmt.Errorf("failed to create chain instance: %v", err)
return nil, nil, err
}
return chain, teardown, nil

View File

@@ -3,7 +3,7 @@ fullblocktests
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/kaspanet/kaspad/blockchain/fullblocktests)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/daglabs/btcd/blockchain/fullblocktests)
Package fullblocktests provides a set of full block tests to be used for testing
the consensus validation rules. The tests are intended to be flexible enough to
@@ -20,7 +20,7 @@ of blocks that exercise the consensus validation rules.
## Installation and Updating
```bash
$ go get -u github.com/kaspanet/kaspad/blockchain/fullblocktests
$ go get -u github.com/daglabs/btcd/blockchain/fullblocktests
```
## License

View File

@@ -12,20 +12,20 @@ package fullblocktests
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/pkg/errors"
"math"
"runtime"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/btcec"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/random"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/btcec"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/random"
"github.com/daglabs/btcd/wire"
)
const (

View File

@@ -10,12 +10,12 @@ import (
"math/big"
"time"
"github.com/kaspanet/kaspad/util/hdkeychain"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/daglabs/btcd/util/hdkeychain"
"github.com/daglabs/btcd/util/subnetworkid"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
// newHashFromStr converts the passed big-endian hex string into a

View File

@@ -3,7 +3,7 @@ indexers
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)](https://travis-ci.org/btcsuite/btcd)
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://godoc.org/github.com/kaspanet/kaspad/blockchain/indexers?status.png)](http://godoc.org/github.com/kaspanet/kaspad/blockchain/indexers)
[![GoDoc](https://godoc.org/github.com/daglabs/btcd/blockchain/indexers?status.png)](http://godoc.org/github.com/daglabs/btcd/blockchain/indexers)
Package indexers implements optional block chain indexes.
@@ -23,7 +23,7 @@ via an RPC interface.
## Installation
```bash
$ go get -u github.com/kaspanet/kaspad/blockchain/indexers
$ go get -u github.com/daglabs/btcd/blockchain/indexers
```
## License

View File

@@ -3,12 +3,12 @@ package indexers
import (
"bytes"
"encoding/gob"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"fmt"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
@@ -156,7 +156,7 @@ func dbFetchTxsAcceptanceDataByID(dbTx database.Tx,
bucket := dbTx.Metadata().Bucket(acceptanceIndexKey)
serializedTxsAcceptanceData := bucket.Get(serializedBlockID)
if serializedTxsAcceptanceData == nil {
return nil, errors.Errorf("no entry in the accpetance index for block id %d", blockID)
return nil, fmt.Errorf("no entry in the accpetance index for block id %d", blockID)
}
return deserializeMultiBlockTxsAcceptanceData(serializedTxsAcceptanceData)
@@ -167,29 +167,23 @@ type serializableTxAcceptanceData struct {
IsAccepted bool
}
type serializableBlockTxsAcceptanceData struct {
BlockHash daghash.Hash
TxAcceptanceData []serializableTxAcceptanceData
}
type serializableBlockTxsAcceptanceData []serializableTxAcceptanceData
type serializableMultiBlockTxsAcceptanceData []serializableBlockTxsAcceptanceData
type serializableMultiBlockTxsAcceptanceData map[daghash.Hash]serializableBlockTxsAcceptanceData
func serializeMultiBlockTxsAcceptanceData(
multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) ([]byte, error) {
txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) ([]byte, error) {
// Convert MultiBlockTxsAcceptanceData to a serializable format
serializableData := make(serializableMultiBlockTxsAcceptanceData, len(multiBlockTxsAcceptanceData))
for i, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
serializableBlockData := serializableBlockTxsAcceptanceData{
BlockHash: blockTxsAcceptanceData.BlockHash,
TxAcceptanceData: make([]serializableTxAcceptanceData, len(blockTxsAcceptanceData.TxAcceptanceData)),
}
for i, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
serializableBlockData.TxAcceptanceData[i] = serializableTxAcceptanceData{
serializableData := make(serializableMultiBlockTxsAcceptanceData, len(txsAcceptanceData))
for hash, blockTxsAcceptanceData := range txsAcceptanceData {
serializableBlockData := make(serializableBlockTxsAcceptanceData, len(blockTxsAcceptanceData))
for i, txAcceptanceData := range blockTxsAcceptanceData {
serializableBlockData[i] = serializableTxAcceptanceData{
MsgTx: *txAcceptanceData.Tx.MsgTx(),
IsAccepted: txAcceptanceData.IsAccepted,
}
}
serializableData[i] = serializableBlockData
serializableData[hash] = serializableBlockData
}
// Serialize
@@ -214,21 +208,18 @@ func deserializeMultiBlockTxsAcceptanceData(
}
// Convert serializable format to MultiBlockTxsAcceptanceData
multiBlockTxsAcceptanceData := make(blockdag.MultiBlockTxsAcceptanceData, len(serializedData))
for i, serializableBlockData := range serializedData {
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
BlockHash: serializableBlockData.BlockHash,
TxAcceptanceData: make([]blockdag.TxAcceptanceData, len(serializableBlockData.TxAcceptanceData)),
}
for i, txData := range serializableBlockData.TxAcceptanceData {
txsAcceptanceData := make(blockdag.MultiBlockTxsAcceptanceData, len(serializedData))
for hash, serializableBlockData := range serializedData {
blockTxsAcceptanceData := make(blockdag.BlockTxsAcceptanceData, len(serializableBlockData))
for i, txData := range serializableBlockData {
msgTx := txData.MsgTx
blockTxsAcceptanceData.TxAcceptanceData[i] = blockdag.TxAcceptanceData{
blockTxsAcceptanceData[i] = blockdag.TxAcceptanceData{
Tx: util.NewTx(&msgTx),
IsAccepted: txData.IsAccepted,
}
}
multiBlockTxsAcceptanceData[i] = blockTxsAcceptanceData
txsAcceptanceData[hash] = blockTxsAcceptanceData
}
return multiBlockTxsAcceptanceData, nil
return txsAcceptanceData, nil
}

View File

@@ -1,13 +1,13 @@
package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"fmt"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
"io"
"io/ioutil"
"os"
@@ -18,6 +18,8 @@ import (
)
func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
txsAcceptanceData := blockdag.MultiBlockTxsAcceptanceData{}
// Create test data
hash, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
txIn1 := &wire.TxIn{SignatureScript: []byte{1}, PreviousOutpoint: wire.Outpoint{Index: 1}, Sequence: 0}
@@ -25,22 +27,19 @@ func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
txOut1 := &wire.TxOut{ScriptPubKey: []byte{1}, Value: 10}
txOut2 := &wire.TxOut{ScriptPubKey: []byte{2}, Value: 20}
blockTxsAcceptanceData := blockdag.BlockTxsAcceptanceData{
BlockHash: *hash,
TxAcceptanceData: []blockdag.TxAcceptanceData{
{
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
IsAccepted: true,
},
{
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
IsAccepted: false,
},
{
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn1}, []*wire.TxOut{txOut1})),
IsAccepted: true,
},
{
Tx: util.NewTx(wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn2}, []*wire.TxOut{txOut2})),
IsAccepted: false,
},
}
multiBlockTxsAcceptanceData := blockdag.MultiBlockTxsAcceptanceData{blockTxsAcceptanceData}
txsAcceptanceData[*hash] = blockTxsAcceptanceData
// Serialize
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(multiBlockTxsAcceptanceData)
serializedTxsAcceptanceData, err := serializeMultiBlockTxsAcceptanceData(txsAcceptanceData)
if err != nil {
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: serialization failed: %s", err)
}
@@ -52,7 +51,7 @@ func TestAcceptanceIndexSerializationAndDeserialization(t *testing.T) {
}
// Check that they're the same
if !reflect.DeepEqual(multiBlockTxsAcceptanceData, deserializedTxsAcceptanceData) {
if !reflect.DeepEqual(txsAcceptanceData, deserializedTxsAcceptanceData) {
t.Fatalf("TestAcceptanceIndexSerializationAndDeserialization: original data and deseralize data aren't equal")
}
}
@@ -260,7 +259,7 @@ func copyDirectory(scrDir, dest string) error {
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if !ok {
return errors.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath)
return fmt.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath)
}
switch fileInfo.Mode() & os.ModeType {
@@ -324,7 +323,7 @@ func createIfNotExists(dir string, perm os.FileMode) error {
}
if err := os.MkdirAll(dir, perm); err != nil {
return errors.Errorf("failed to create directory: '%s', error: '%s'", dir, err.Error())
return fmt.Errorf("failed to create directory: '%s', error: '%s'", dir, err.Error())
}
return nil

View File

@@ -5,17 +5,17 @@
package indexers
import (
"errors"
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
@@ -898,7 +898,7 @@ func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
return fmt.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}

View File

@@ -7,10 +7,9 @@ package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/wire"
)
// addrIndexBucket provides a mock address index database bucket by implementing
@@ -128,17 +127,17 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return errors.Errorf("level %d has %d entries",
return fmt.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return errors.Errorf("level %d has %d entries", level,
return fmt.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return errors.Errorf("expected %d entries - got %d", expectedTotal,
return fmt.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
@@ -152,7 +151,7 @@ func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal i
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return errors.Errorf("level %d offset %d does "+
return fmt.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)

View File

@@ -0,0 +1,367 @@
// Copyright (c) 2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"errors"
"fmt"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/util/gcs"
"github.com/daglabs/btcd/util/gcs/builder"
"github.com/daglabs/btcd/wire"
)
const (
// cfIndexName is the human-readable name for the index.
cfIndexName = "committed filter index"
)
// Committed filters come in two flavours: basic and extended. They are
// generated and dropped in pairs, and both are indexed by a block's hash.
// Besides holding different content, they also live in different buckets.
var (
// cfIndexParentBucketKey is the name of the parent bucket used to house
// the index. The rest of the buckets live below this bucket.
cfIndexParentBucketKey = []byte("cfindexparentbucket")
// cfIndexKeys is an array of db bucket names used to house indexes of
// block hashes to cfilters.
cfIndexKeys = [][]byte{
[]byte("cf0byhashidx"),
[]byte("cf1byhashidx"),
}
// cfHeaderKeys is an array of db bucket names used to house indexes of
// block hashes to cf headers.
cfHeaderKeys = [][]byte{
[]byte("cf0headerbyhashidx"),
[]byte("cf1headerbyhashidx"),
}
// cfHashKeys is an array of db bucket names used to house indexes of
// block hashes to cf hashes.
cfHashKeys = [][]byte{
[]byte("cf0hashbyhashidx"),
[]byte("cf1hashbyhashidx"),
}
maxFilterType = uint8(len(cfHeaderKeys) - 1)
)
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
// An entry's absence is not considered an error.
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) ([]byte, error) {
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
return idx.Get(h[:]), nil
}
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash, f []byte) error {
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
return idx.Put(h[:], f)
}
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) error {
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
return idx.Delete(h[:])
}
// CfIndex implements a committed filter (cf) by hash index.
type CfIndex struct {
db database.DB
dagParams *dagconfig.Params
}
// Ensure the CfIndex type implements the Indexer interface.
var _ Indexer = (*CfIndex)(nil)
// Init initializes the hash-based cf index. This is part of the Indexer
// interface.
func (idx *CfIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice. This is
// part of the Indexer interface.
func (idx *CfIndex) Key() []byte {
return cfIndexParentBucketKey
}
// Name returns the human-readable name of the index. This is part of the
// Indexer interface.
func (idx *CfIndex) Name() string {
return cfIndexName
}
// Create is invoked when the indexer manager determines the index needs to
// be created for the first time. It creates buckets for the two hash-based cf
// indexes (simple, extended).
func (idx *CfIndex) Create(dbTx database.Tx) error {
meta := dbTx.Metadata()
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
if err != nil {
return err
}
for _, bucketName := range cfIndexKeys {
_, err = cfIndexParentBucket.CreateBucket(bucketName)
if err != nil {
return err
}
}
for _, bucketName := range cfHeaderKeys {
_, err = cfIndexParentBucket.CreateBucket(bucketName)
if err != nil {
return err
}
}
for _, bucketName := range cfHashKeys {
_, err = cfIndexParentBucket.CreateBucket(bucketName)
if err != nil {
return err
}
}
return nil
}
// storeFilter stores a given filter, and performs the steps needed to
// generate the filter's header.
func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
filterType wire.FilterType) error {
if uint8(filterType) > maxFilterType {
return errors.New("unsupported filter type")
}
// Figure out which buckets to use.
fkey := cfIndexKeys[filterType]
hkey := cfHeaderKeys[filterType]
hashkey := cfHashKeys[filterType]
// Start by storing the filter.
h := block.Hash()
filterBytes, err := f.NBytes()
if err != nil {
return err
}
err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
if err != nil {
return err
}
// Next store the filter hash.
filterHash, err := builder.GetFilterHash(f)
if err != nil {
return err
}
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
if err != nil {
return err
}
// Then fetch the previous block's filter header.
var prevHeader *daghash.Hash
header := block.MsgBlock().Header
if header.IsGenesis() {
prevHeader = &daghash.ZeroHash
} else {
// TODO(Evgeny): Current implementation of GCS filter inherited from chain
// (single parent) and must be ported to DAG (multiple parents)
var parentHash *daghash.Hash
if header.NumParentBlocks() != 0 {
parentHash = header.ParentHashes[0]
}
prevFilterHashBytes, err := dbFetchFilterIdxEntry(dbTx, hkey, parentHash)
if err != nil {
return err
}
// Construct the new block's filter header, and store it.
prevHeader, err = daghash.NewHash(prevFilterHashBytes)
if err != nil {
return err
}
}
fh, err := builder.MakeHeaderForFilter(f, prevHeader)
if err != nil {
return err
}
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain. This indexer adds a hash-to-cf mapping for
// every passed block. This is part of the Indexer interface.
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ uint64,
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
f, err := builder.BuildBasicFilter(block.MsgBlock())
if err != nil {
return err
}
err = storeFilter(dbTx, block, f, wire.GCSFilterRegular)
if err != nil {
return err
}
f, err = builder.BuildExtFilter(block.MsgBlock())
if err != nil {
return err
}
return storeFilter(dbTx, block, f, wire.GCSFilterExtended)
}
// DisconnectBlock is invoked by the index manager when a block has been
// disconnected from the main chain. This indexer removes the hash-to-cf
// mapping for every passed block. This is part of the Indexer interface.
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *util.Block,
_ *blockdag.BlockDAG) error {
for _, key := range cfIndexKeys {
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
if err != nil {
return err
}
}
for _, key := range cfHeaderKeys {
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
if err != nil {
return err
}
}
for _, key := range cfHashKeys {
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
if err != nil {
return err
}
}
return nil
}
// entryByBlockHash fetches a filter index entry of a particular type
// (eg. filter, filter header, etc) for a filter type and block hash.
func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
filterType wire.FilterType, h *daghash.Hash) ([]byte, error) {
if uint8(filterType) > maxFilterType {
return nil, errors.New("unsupported filter type")
}
key := filterTypeKeys[filterType]
var entry []byte
err := idx.db.View(func(dbTx database.Tx) error {
var err error
entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
return err
})
return entry, err
}
// entriesByBlockHashes batch fetches a filter index entry of a particular type
// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
filterType wire.FilterType, blockHashes []*daghash.Hash) ([][]byte, error) {
if uint8(filterType) > maxFilterType {
return nil, errors.New("unsupported filter type")
}
key := filterTypeKeys[filterType]
entries := make([][]byte, 0, len(blockHashes))
err := idx.db.View(func(dbTx database.Tx) error {
for _, blockHash := range blockHashes {
entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
if err != nil {
return err
}
entries = append(entries, entry)
}
return nil
})
return entries, err
}
// FilterByBlockHash returns the serialized contents of a block's basic or
// extended committed filter.
func (idx *CfIndex) FilterByBlockHash(h *daghash.Hash,
filterType wire.FilterType) ([]byte, error) {
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
}
// FiltersByBlockHashes returns the serialized contents of a block's basic or
// extended committed filter for a set of blocks by hash.
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*daghash.Hash,
filterType wire.FilterType) ([][]byte, error) {
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
}
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
// or extended committed filter header.
func (idx *CfIndex) FilterHeaderByBlockHash(h *daghash.Hash,
filterType wire.FilterType) ([]byte, error) {
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
}
// FilterHeadersByBlockHashes returns the serialized contents of a block's basic
// or extended committed filter header for a set of blocks by hash.
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*daghash.Hash,
filterType wire.FilterType) ([][]byte, error) {
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
}
// FilterHashByBlockHash returns the serialized contents of a block's basic
// or extended committed filter hash.
func (idx *CfIndex) FilterHashByBlockHash(h *daghash.Hash,
filterType wire.FilterType) ([]byte, error) {
return idx.entryByBlockHash(cfHashKeys, filterType, h)
}
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
// or extended committed filter hash for a set of blocks by hash.
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
filterType wire.FilterType) ([][]byte, error) {
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *CfIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return fmt.Errorf("cfindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the cfindex with --dropcfindex", lastKnownBlockID-currentBlockID)
}
// NewCfIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all blocks in the blockchain to their respective
// committed filters.
//
// It implements the Indexer interface which plugs into the IndexManager that
// in turn is used by the blockchain package. This allows the index to be
// seamlessly maintained along with the chain.
func NewCfIndex(dagParams *dagconfig.Params) *CfIndex {
return &CfIndex{dagParams: dagParams}
}
// DropCfIndex drops the CF index from the provided database if exists.
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
}

View File

@@ -9,10 +9,11 @@ package indexers
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"errors"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
)
var (

View File

@@ -5,9 +5,9 @@
package indexers
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.INDX)
var spawn = panics.GoroutineWrapperFunc(log)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)

View File

@@ -5,10 +5,10 @@
package indexers
import (
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
var (

View File

@@ -6,12 +6,11 @@ package indexers
import (
"fmt"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
const (
@@ -153,7 +152,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.Block
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, txsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
@@ -179,12 +178,12 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, mu
includingBlocksOffset += includingBlocksIndexKeyEntrySize
}
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
for includingBlockHash, blockTxsAcceptanceData := range txsAcceptanceData {
var includingBlockID uint64
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
if includingBlockHash.IsEqual(block.Hash()) {
includingBlockID = blockID
} else {
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &includingBlockHash)
if err != nil {
return err
}
@@ -192,7 +191,7 @@ func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, mu
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData {
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
if err != nil {
return err
@@ -207,13 +206,13 @@ func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsA
// Initialize a new txsAcceptedByVirtual
entries := 0
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
entries += len(blockTxsAcceptanceData)
}
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData {
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
}
}
@@ -371,7 +370,11 @@ func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
if bucket == nil {
return nil, nil
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No accepting blocks bucket "+
"exists for %s", txID),
}
}
cursor := bucket.Cursor()
if !cursor.First() {
@@ -426,6 +429,6 @@ func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
//
// This is part of the Indexer interface.
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
return fmt.Errorf("txindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
}

View File

@@ -5,13 +5,13 @@ import (
"reflect"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/blockdag"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/mining"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
"github.com/daglabs/btcd/wire"
)
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {

View File

@@ -5,9 +5,9 @@
package blockdag
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
"github.com/daglabs/btcd/logger"
"github.com/daglabs/btcd/util/panics"
)
var log, _ = logger.Get(logger.SubsystemTags.BDAG)
var spawn = panics.GoroutineWrapperFunc(log)
var spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)

View File

@@ -7,8 +7,8 @@ package blockdag
import (
"math"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// MerkleTree holds the hashes of a merkle tree

View File

@@ -5,10 +5,10 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util/daghash"
"testing"
"github.com/kaspanet/kaspad/util"
"github.com/daglabs/btcd/util"
)
// TestMerkle tests the BuildHashMerkleTreeStore API.

View File

@@ -6,8 +6,8 @@ package blockdag
import (
"fmt"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// NotificationType represents the type of a notification message.

View File

@@ -8,7 +8,7 @@ import (
"path/filepath"
"testing"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/daglabs/btcd/dagconfig"
)
// TestNotifications ensures that notification callbacks are fired on events.

View File

@@ -1,7 +1,7 @@
package blockdag
import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util/daghash"
)
// phantom calculates and returns the block's blue set, selected parent and blue score.

View File

@@ -7,9 +7,9 @@ import (
"testing"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util/daghash"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/daglabs/btcd/dagconfig"
)
type testBlockData struct {

View File

@@ -8,8 +8,8 @@ import (
"fmt"
"time"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/util/daghash"
)
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
@@ -112,12 +112,7 @@ func (dag *BlockDAG) processOrphans(hash *daghash.Hash, flags BehaviorFlags) err
// Potentially accept the block into the block DAG.
err = dag.maybeAcceptBlock(orphan.block, flags|BFWasUnorphaned)
if err != nil {
// Since we don't want to reject the original block because of
// a bad unorphaned child, only return an error if it's not a RuleError.
if _, ok := err.(RuleError); !ok {
return err
}
log.Warnf("Verification failed for orphan block %s: %s", orphanHash, err)
return err
}
// Add this block to the list of blocks to process so

View File

@@ -3,10 +3,8 @@ package blockdag
import (
"bou.ke/monkey"
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"path/filepath"
"github.com/daglabs/btcd/dagconfig"
"github.com/daglabs/btcd/util"
"testing"
"time"
)
@@ -67,65 +65,3 @@ func TestProcessBlock(t *testing.T) {
t.Errorf("ProcessBlock: Expected error \"%s\" but got \"%s\"", expectedErrMsg, err)
}
}
func TestProcessOrphans(t *testing.T) {
dag, teardownFunc, err := DAGSetup("TestProcessOrphans", Config{
DAGParams: &dagconfig.SimNetParams,
})
if err != nil {
t.Errorf("Failed to setup dag instance: %v", err)
return
}
defer teardownFunc()
dag.TestSetCoinbaseMaturity(0)
blocksFile := "blk_0_to_4.dat"
blocks, err := LoadBlocks(filepath.Join("testdata/", blocksFile))
if err != nil {
t.Fatalf("TestProcessOrphans: "+
"Error loading file '%s': %s\n", blocksFile, err)
}
// Get a reference to a parent block
parentBlock := blocks[1]
// Get a reference to a child block and mess with it so that:
// a. It gets added to the orphan pool
// b. It gets rejected once it's unorphaned
childBlock := blocks[2]
childBlock.MsgBlock().Header.UTXOCommitment = &daghash.ZeroHash
// Process the child block so that it gets added to the orphan pool
isOrphan, delay, err := dag.ProcessBlock(childBlock, BFNoPoWCheck)
if err != nil {
t.Fatalf("TestProcessOrphans: child block unexpectedly returned an error: %s", err)
}
if delay != 0 {
t.Fatalf("TestProcessOrphans: child block is too far in the future")
}
if !isOrphan {
t.Fatalf("TestProcessOrphans: incorrectly returned that child block is not an orphan")
}
// Process the parent block. Note that this will attempt to unorphan the child block
isOrphan, delay, err = dag.ProcessBlock(parentBlock, BFNone)
if err != nil {
t.Fatalf("TestProcessOrphans: parent block unexpectedly returned an error: %s", err)
}
if delay != 0 {
t.Fatalf("TestProcessOrphans: parent block is too far in the future")
}
if isOrphan {
t.Fatalf("TestProcessOrphans: incorrectly returned that parent block is an orphan")
}
// Make sure that the child block had been rejected
node := dag.index.LookupNode(childBlock.Hash())
if node == nil {
t.Fatalf("TestProcessOrphans: child block missing from block index")
}
if !dag.index.NodeStatus(node).KnownInvalid() {
t.Fatalf("TestProcessOrphans: child block erroneously not marked as invalid")
}
}

View File

@@ -9,9 +9,9 @@ import (
"runtime"
"time"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
// txValidateItem holds a transaction along with which input to validate.

Some files were not shown because too many files have changed in this diff Show More