Compare commits

...

7 Commits

85 changed files with 4 additions and 6629 deletions

View File

@@ -1,45 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
type txgenClient struct {
*rpcclient.Client
onBlockAdded chan *blockAddedMsg
}
type blockAddedMsg struct {
chainHeight uint64
header *wire.BlockHeader
txs []*util.Tx
}
func newTxgenClient(connCfg *rpcclient.ConnConfig) (*txgenClient, error) {
client := &txgenClient{
onBlockAdded: make(chan *blockAddedMsg),
}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
txs []*util.Tx) {
client.onBlockAdded <- &blockAddedMsg{
chainHeight: height,
header: header,
txs: txs,
}
},
}
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
return client, nil
}

View File

@@ -1,79 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"path/filepath"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "txgen.log"
defaultErrLogFilename = "txgen_err.log"
)
var (
// Default configuration options
defaultHomeDir = util.AppDataDir("txgen", false)
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
defaultTargetNumberOfOutputs uint64 = 1
defaultTargetNumberOfInputs uint64 = 1
)
type configFlags struct {
Address string `long:"address" description:"An address to a JSON-RPC endpoints" required:"true"`
PrivateKey string `long:"private-key" description:"Private key" required:"true"`
SecondaryAddress string `long:"secondary-address" description:"An address that gets paid once per txgen run"`
CertificatePath string `long:"cert" description:"Path to certificate accepted by JSON-RPC endpoint"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
TxInterval uint64 `long:"tx-interval" description:"Transaction emission interval (in milliseconds)"`
TargetNumberOfOutputs uint64 `long:"num-outputs" description:"Target number of transaction outputs (with some randomization)"`
TargetNumberOfInputs uint64 `long:"num-inputs" description:"Target number of transaction inputs (with some randomization)"`
AveragePayloadSize uint64 `long:"payload-size" description:"Average size of transaction payload"`
AverageGasFraction float64 `long:"gas-fraction" description:"The average portion of gas from the gas limit"`
AverageFeeRate float64 `long:"fee-rate" description:"Average coins per gram fee rate"`
}
func parseConfig() (*configFlags, error) {
cfg := &configFlags{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if cfg.CertificatePath == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if cfg.CertificatePath != "" && cfg.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
if cfg.AverageGasFraction >= 1 || cfg.AverageGasFraction < 0 {
return nil, errors.New("--gas-fraction should be between 0 and 1")
}
if cfg.TargetNumberOfOutputs < 0 {
return nil, errors.New("--num-outputs should be positive")
}
if cfg.TargetNumberOfInputs < 0 {
return nil, errors.New("--num-inputs should be positive")
}
if cfg.TargetNumberOfOutputs == 0 {
cfg.TargetNumberOfOutputs = defaultTargetNumberOfOutputs
}
if cfg.TargetNumberOfInputs == 0 {
cfg.TargetNumberOfInputs = defaultTargetNumberOfInputs
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil
}

View File

@@ -1,39 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
"io/ioutil"
)
func connectToServer(cfg *configFlags) (*txgenClient, error) {
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.CertificatePath)
if err != nil {
return nil, errors.Errorf("Error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.Address,
Endpoint: "ws",
User: "user",
Pass: "pass",
DisableTLS: cfg.DisableTLS,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
client, err := newTxgenClient(connCfg)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", cfg.Address, err)
}
log.Infof("Connected to server %s", cfg.Address)
return client, nil
}

View File

@@ -1,28 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd cmd/txgen && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o txgen .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/cmd/txgen/txgen /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/txgen"]

View File

@@ -1,9 +0,0 @@
1. To build docker image invoke following command from btcd root directory:
docker build -t txgen -f ./cmd/txgen/docker/Dockerfile .
2. To run:
a. create folder ~/.btcd/txgen with the following files:
rpc.cert - certificate file that all rpc nodes accept
addresses - list of node addresses in the format [hostname]:[port]. One node per line
b. run:
docker run -v ~/.btcd:/root/.btcd -t txgen

View File

@@ -1,27 +0,0 @@
package main
import (
"fmt"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/util/panics"
"os"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("TXGN")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
err := backendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = backendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}

View File

@@ -1,70 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/base58"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
var (
activeNetParams = &dagconfig.DevNetParams
p2pkhAddress util.Address
secondaryAddress util.Address
privateKey *ecc.PrivateKey
)
// privateKeyToP2pkhAddress generates p2pkh address from private key.
func privateKeyToP2pkhAddress(key *ecc.PrivateKey, net *dagconfig.Params) (util.Address, error) {
return util.NewAddressPubKeyHashFromPublicKey(key.PubKey().SerializeCompressed(), net.Prefix)
}
func main() {
defer panics.HandlePanic(log, nil, nil)
cfg, err := parseConfig()
if err != nil {
panic(errors.Errorf("Error parsing command-line arguments: %s", err))
}
privateKeyBytes := base58.Decode(cfg.PrivateKey)
privateKey, _ = ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
p2pkhAddress, err = privateKeyToP2pkhAddress(privateKey, activeNetParams)
if err != nil {
panic(errors.Errorf("Failed to get P2PKH address from private key: %s", err))
}
log.Infof("P2PKH address for private key: %s\n", p2pkhAddress)
if cfg.SecondaryAddress != "" {
secondaryAddress, err = util.DecodeAddress(cfg.SecondaryAddress, activeNetParams.Prefix)
if err != nil {
panic(errors.Errorf("Failed to decode secondary address %s: %s", cfg.SecondaryAddress, err))
}
}
client, err := connectToServer(cfg)
if err != nil {
panic(errors.Errorf("Error connecting to servers: %s", err))
}
defer disconnect(client)
spawn(func() {
err := txLoop(client, cfg)
if err != nil {
panic(err)
}
})
interrupt := signal.InterruptListener()
<-interrupt
}
func disconnect(client *txgenClient) {
log.Infof("Disconnecting client")
client.Disconnect()
}

View File

@@ -1,528 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"github.com/pkg/errors"
"math"
"math/rand"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
)
const (
// Those constants should be updated, when monetary policy changed
minSpendableAmount uint64 = 10000
maxSpendableAmount uint64 = 5 * minSpendableAmount
minTxFee uint64 = 3000
// spendSize is the largest number of bytes of a sigScript
// which spends a p2pkh output: OP_DATA_73 <sig> OP_DATA_33 <pubkey>
spendSize uint64 = 1 + 73 + 1 + 33
// Value 8 bytes + serialized varint size for the length of ScriptPubKey +
// ScriptPubKey bytes.
outputSize uint64 = 8 + 1 + 25
txLifeSpan = 1000
requiredConfirmations = 10
approximateConfirmationsForCoinbaseMaturity = 150
searchRawTransactionResultCount = 1000
searchRawTransactionMaxResults = 5000
txMaxQueueLength = 10000
maxResendDepth = 500
minSecondaryTxAmount = 100000000
)
type walletTransaction struct {
tx *util.Tx
chainHeight uint64
checkConfirmationCountdown uint64
confirmed bool
}
type utxoSet map[wire.Outpoint]*wire.TxOut
func isDust(value uint64) bool {
return value < minSpendableAmount+minTxFee
}
var (
random = rand.New(rand.NewSource(time.Now().UnixNano()))
primaryScriptPubKey []byte
secondaryScriptPubKey []byte
sentToSecondaryAddress bool
)
// txLoop performs main loop of transaction generation
func txLoop(client *txgenClient, cfg *configFlags) error {
filterAddresses := []util.Address{p2pkhAddress}
var err error
primaryScriptPubKey, err = txscript.PayToAddrScript(p2pkhAddress)
if err != nil {
return errors.Errorf("failed to generate primaryScriptPubKey to address: %s", err)
}
if secondaryAddress != nil {
secondaryScriptPubKey, err = txscript.PayToAddrScript(secondaryAddress)
if err != nil {
return errors.Errorf("failed to generate primaryScriptPubKey to address: %s", err)
}
filterAddresses = append(filterAddresses, secondaryAddress)
}
err = client.LoadTxFilter(true, filterAddresses, nil)
if err != nil {
return err
}
gasLimitMap := make(map[subnetworkid.SubnetworkID]uint64)
gasLimitMap[*subnetworkid.SubnetworkIDNative] = 0
walletUTXOSet, walletTxs, err := getInitialUTXOSetAndWalletTxs(client, gasLimitMap)
if err != nil {
return err
}
txChan := make(chan *wire.MsgTx, txMaxQueueLength)
spawn(func() {
err := sendTransactionLoop(client, cfg.TxInterval, txChan)
if err != nil {
panic(err)
}
})
for blockAdded := range client.onBlockAdded {
log.Infof("Block %s Added with %d relevant transactions", blockAdded.header.BlockHash(), len(blockAdded.txs))
err := updateSubnetworks(blockAdded.txs, gasLimitMap)
if err != nil {
return err
}
updateWalletTxs(blockAdded, walletTxs)
err = enqueueTransactions(client, blockAdded, walletUTXOSet, walletTxs, txChan, cfg, gasLimitMap)
if err != nil {
return err
}
}
return nil
}
func updateSubnetworks(txs []*util.Tx, gasLimitMap map[subnetworkid.SubnetworkID]uint64) error {
for _, tx := range txs {
msgTx := tx.MsgTx()
if msgTx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) {
subnetworkID, err := blockdag.TxToSubnetworkID(msgTx)
if err != nil {
return errors.Errorf("could not build subnetwork ID: %s", err)
}
gasLimit := blockdag.ExtractGasLimit(msgTx)
log.Infof("Found subnetwork %s with gas limit %d", subnetworkID, gasLimit)
gasLimitMap[*subnetworkID] = gasLimit
}
}
return nil
}
func sendTransactionLoop(client *txgenClient, interval uint64, txChan chan *wire.MsgTx) error {
var ticker *time.Ticker
if interval != 0 {
ticker = time.NewTicker(time.Duration(interval) * time.Millisecond)
}
for tx := range txChan {
_, err := client.SendRawTransaction(tx, true)
log.Infof("Sending tx %s to subnetwork %s with %d inputs, %d outputs, %d payload size and %d gas", tx.TxID(), tx.SubnetworkID, len(tx.TxIn), len(tx.TxOut), len(tx.Payload), tx.Gas)
if err != nil {
return err
}
if ticker != nil {
<-ticker.C
}
}
return nil
}
func getInitialUTXOSetAndWalletTxs(client *txgenClient, gasLimitMap map[subnetworkid.SubnetworkID]uint64) (utxoSet, map[daghash.TxID]*walletTransaction, error) {
walletUTXOSet := make(utxoSet)
walletTxs := make(map[daghash.TxID]*walletTransaction)
initialTxs, err := collectTransactions(client, gasLimitMap)
if err != nil {
return nil, nil, err
}
// Add all of the confirmed transaction outputs to the UTXO.
for _, initialTx := range initialTxs {
if initialTx.confirmed {
addTxOutsToUTXOSet(walletUTXOSet, initialTx.tx.MsgTx())
}
}
for _, initialTx := range initialTxs {
// Remove all of the previous outpoints from the UTXO.
// The previous outpoints are removed for unconfirmed
// transactions as well, to avoid potential
// double spends.
removeTxInsFromUTXOSet(walletUTXOSet, initialTx.tx.MsgTx())
// Add unconfirmed transactions to walletTxs, so we can
// add their outputs to the UTXO when they are confirmed.
if !initialTx.confirmed {
walletTxs[*initialTx.tx.ID()] = initialTx
}
}
return walletUTXOSet, walletTxs, nil
}
func updateWalletTxs(blockAdded *blockAddedMsg, walletTxs map[daghash.TxID]*walletTransaction) {
for txID, walletTx := range walletTxs {
if walletTx.checkConfirmationCountdown > 0 && walletTx.chainHeight < blockAdded.chainHeight {
walletTx.checkConfirmationCountdown--
}
// Delete old confirmed transactions to save memory
if walletTx.confirmed && walletTx.chainHeight+txLifeSpan < blockAdded.chainHeight {
delete(walletTxs, txID)
}
}
for _, tx := range blockAdded.txs {
if _, ok := walletTxs[*tx.ID()]; !ok {
walletTxs[*tx.ID()] = &walletTransaction{
tx: tx,
chainHeight: blockAdded.chainHeight,
checkConfirmationCountdown: requiredConfirmations,
}
}
}
}
func randomWithAverageTarget(target float64) float64 {
randomFraction := random.Float64()
return randomFraction * target * 2
}
func randomIntegerWithAverageTarget(target uint64, allowZero bool) uint64 {
randomNum := randomWithAverageTarget(float64(target))
if !allowZero && randomNum < 1 {
randomNum = 1
}
return uint64(math.Round(randomNum))
}
func createRandomTxFromFunds(walletUTXOSet utxoSet, cfg *configFlags, gasLimitMap map[subnetworkid.SubnetworkID]uint64, funds uint64) (tx *wire.MsgTx, isSecondaryAddress bool, err error) {
if secondaryScriptPubKey != nil && !sentToSecondaryAddress && funds > minSecondaryTxAmount {
tx, err = createTx(walletUTXOSet, minSecondaryTxAmount, cfg.AverageFeeRate, 1, 1, subnetworkid.SubnetworkIDNative, 0, 0, secondaryScriptPubKey)
if err != nil {
return nil, false, err
}
return tx, true, nil
}
payloadSize := uint64(0)
gas := uint64(0)
// In Go map iteration is randomized, so if we want
// to choose a random element from a map we can
// just take the first iterated element.
chosenSubnetwork := subnetworkid.SubnetworkIDNative
chosenGasLimit := uint64(0)
for subnetworkID, gasLimit := range gasLimitMap {
chosenSubnetwork = &subnetworkID
chosenGasLimit = gasLimit
break
}
if !chosenSubnetwork.IsEqual(subnetworkid.SubnetworkIDNative) {
payloadSize = randomIntegerWithAverageTarget(cfg.AveragePayloadSize, true)
gas = randomIntegerWithAverageTarget(uint64(float64(chosenGasLimit)*cfg.AverageGasFraction), true)
if gas > chosenGasLimit {
gas = chosenGasLimit
}
}
targetNumberOfOutputs := randomIntegerWithAverageTarget(cfg.TargetNumberOfOutputs, false)
targetNumberOfInputs := randomIntegerWithAverageTarget(cfg.TargetNumberOfInputs, false)
feeRate := randomWithAverageTarget(cfg.AverageFeeRate)
amount := minSpendableAmount + uint64(random.Int63n(int64(maxSpendableAmount-minSpendableAmount)))
amount *= targetNumberOfOutputs
if amount > funds-minTxFee {
amount = funds - minTxFee
}
tx, err = createTx(walletUTXOSet, amount, feeRate, targetNumberOfOutputs, targetNumberOfInputs, chosenSubnetwork, payloadSize, gas, primaryScriptPubKey)
if err != nil {
return nil, false, err
}
return tx, true, nil
}
func enqueueTransactions(client *txgenClient, blockAdded *blockAddedMsg, walletUTXOSet utxoSet, walletTxs map[daghash.TxID]*walletTransaction,
txChan chan *wire.MsgTx, cfg *configFlags, gasLimitMap map[subnetworkid.SubnetworkID]uint64) error {
if err := applyConfirmedTransactionsAndResendNonAccepted(client, walletTxs, walletUTXOSet, blockAdded.chainHeight, txChan); err != nil {
return err
}
for funds := calcUTXOSetFunds(walletUTXOSet); !isDust(funds); funds = calcUTXOSetFunds(walletUTXOSet) {
tx, isSecondaryAddress, err := createRandomTxFromFunds(walletUTXOSet, cfg, gasLimitMap, funds)
if err != nil {
return err
}
txChan <- tx
if isSecondaryAddress {
sentToSecondaryAddress = true
}
}
return nil
}
func createTx(walletUTXOSet utxoSet, minAmount uint64, feeRate float64, targetNumberOfOutputs uint64, targetNumberOfInputs uint64,
subnetworkdID *subnetworkid.SubnetworkID, payloadSize uint64, gas uint64, scriptPubKey []byte) (*wire.MsgTx, error) {
var tx *wire.MsgTx
if subnetworkdID.IsEqual(subnetworkid.SubnetworkIDNative) {
tx = wire.NewNativeMsgTx(wire.TxVersion, nil, nil)
} else {
payload := make([]byte, payloadSize)
tx = wire.NewSubnetworkMsgTx(wire.TxVersion, nil, nil, subnetworkdID, gas, payload)
}
// Attempt to fund the transaction with spendable utxos.
funds, err := fundTx(walletUTXOSet, tx, minAmount, feeRate, targetNumberOfOutputs, targetNumberOfInputs)
if err != nil {
return nil, err
}
maxNumOuts := funds / minSpendableAmount
numOuts := targetNumberOfOutputs
if numOuts > maxNumOuts {
numOuts = maxNumOuts
}
fee := calcFee(tx, feeRate, numOuts, walletUTXOSet)
funds -= fee
for i := uint64(0); i < numOuts; i++ {
tx.AddTxOut(&wire.TxOut{
Value: funds / numOuts,
ScriptPubKey: scriptPubKey,
})
}
err = signTx(walletUTXOSet, tx)
if err != nil {
return nil, err
}
removeTxInsFromUTXOSet(walletUTXOSet, tx)
return tx, nil
}
// signTx signs a transaction
func signTx(walletUTXOSet utxoSet, tx *wire.MsgTx) error {
for i, txIn := range tx.TxIn {
outpoint := txIn.PreviousOutpoint
prevOut := walletUTXOSet[outpoint]
sigScript, err := txscript.SignatureScript(tx, i, prevOut.ScriptPubKey,
txscript.SigHashAll, privateKey, true)
if err != nil {
return errors.Errorf("Failed to sign transaction: %s", err)
}
txIn.SignatureScript = sigScript
}
return nil
}
func fundTx(walletUTXOSet utxoSet, tx *wire.MsgTx, amount uint64, feeRate float64, targetNumberOfOutputs uint64, targetNumberOfInputs uint64) (uint64, error) {
amountSelected := uint64(0)
isTxFunded := false
for outpoint, output := range walletUTXOSet {
amountSelected += output.Value
// Add the selected output to the transaction
tx.AddTxIn(wire.NewTxIn(&outpoint, nil))
// Check if transaction has enough funds. If we don't have enough
// coins from he current amount selected to pay the fee, or we have
// less inputs then the targeted amount, continue to grab more coins.
isTxFunded = isFunded(tx, feeRate, targetNumberOfOutputs, amountSelected, amount, walletUTXOSet)
if uint64(len(tx.TxIn)) >= targetNumberOfInputs && isTxFunded {
break
}
}
if !isTxFunded {
return 0, errors.Errorf("not enough funds for coin selection")
}
return amountSelected, nil
}
// isFunded checks if the transaction has enough funds to cover the fee
// required for the txn.
func isFunded(tx *wire.MsgTx, feeRate float64, targetNumberOfOutputs uint64, amountSelected uint64, targetAmount uint64, walletUTXOSet utxoSet) bool {
reqFee := calcFee(tx, feeRate, targetNumberOfOutputs, walletUTXOSet)
return amountSelected > reqFee && amountSelected-reqFee >= targetAmount
}
func calcFee(msgTx *wire.MsgTx, feeRate float64, numberOfOutputs uint64, walletUTXOSet utxoSet) uint64 {
txMass := calcTxMass(msgTx, walletUTXOSet)
txMassWithOutputs := txMass + outputsTotalSize(numberOfOutputs)
reqFee := uint64(float64(txMassWithOutputs) * feeRate)
if reqFee < minTxFee {
return minTxFee
}
return reqFee
}
func outputsTotalSize(numberOfOutputs uint64) uint64 {
return numberOfOutputs*outputSize + uint64(wire.VarIntSerializeSize(numberOfOutputs))
}
func calcTxMass(msgTx *wire.MsgTx, walletUTXOSet utxoSet) uint64 {
previousScriptPubKeys := getPreviousScriptPubKeys(msgTx, walletUTXOSet)
return blockdag.CalcTxMass(util.NewTx(msgTx), previousScriptPubKeys)
}
func getPreviousScriptPubKeys(msgTx *wire.MsgTx, walletUTXOSet utxoSet) [][]byte {
previousScriptPubKeys := make([][]byte, len(msgTx.TxIn))
for i, txIn := range msgTx.TxIn {
outpoint := txIn.PreviousOutpoint
prevOut := walletUTXOSet[outpoint]
previousScriptPubKeys[i] = prevOut.ScriptPubKey
}
return previousScriptPubKeys
}
func applyConfirmedTransactionsAndResendNonAccepted(client *txgenClient, walletTxs map[daghash.TxID]*walletTransaction, walletUTXOSet utxoSet,
blockChainHeight uint64, txChan chan *wire.MsgTx) error {
for txID, walletTx := range walletTxs {
if !walletTx.confirmed && walletTx.checkConfirmationCountdown == 0 {
txResult, err := client.GetRawTransactionVerbose(&txID)
if err != nil {
return err
}
msgTx := walletTx.tx.MsgTx()
if isTxMatured(msgTx, *txResult.Confirmations) {
walletTx.confirmed = true
addTxOutsToUTXOSet(walletUTXOSet, msgTx)
} else if !msgTx.IsCoinBase() && *txResult.Confirmations == 0 && !txResult.IsInMempool && blockChainHeight > walletTx.chainHeight+maxResendDepth {
log.Infof("Transaction %s was not accepted in the DAG. Resending", txID)
txChan <- msgTx
}
}
}
return nil
}
func removeTxInsFromUTXOSet(walletUTXOSet utxoSet, tx *wire.MsgTx) {
for _, txIn := range tx.TxIn {
delete(walletUTXOSet, txIn.PreviousOutpoint)
}
}
func addTxOutsToUTXOSet(walletUTXOSet utxoSet, tx *wire.MsgTx) {
for i, txOut := range tx.TxOut {
if bytes.Equal(txOut.ScriptPubKey, primaryScriptPubKey) {
outpoint := wire.Outpoint{TxID: *tx.TxID(), Index: uint32(i)}
walletUTXOSet[outpoint] = txOut
}
}
}
func isTxMatured(tx *wire.MsgTx, confirmations uint64) bool {
if !tx.IsCoinBase() {
return confirmations >= requiredConfirmations
}
return confirmations >= approximateConfirmationsForCoinbaseMaturity
}
func calcUTXOSetFunds(walletUTXOSet utxoSet) uint64 {
var funds uint64
for _, output := range walletUTXOSet {
funds += output.Value
}
return funds
}
func collectTransactions(client *txgenClient, gasLimitMap map[subnetworkid.SubnetworkID]uint64) (map[daghash.TxID]*walletTransaction, error) {
registryTxs := make([]*util.Tx, 0)
walletTxs := make(map[daghash.TxID]*walletTransaction)
skip := 0
for skip < searchRawTransactionMaxResults {
results, err := client.SearchRawTransactionsVerbose(p2pkhAddress, skip, searchRawTransactionResultCount, true, true, nil)
if err != nil {
// Break when there are no further txs
if rpcError, ok := err.(*rpcmodel.RPCError); ok && rpcError.Code == rpcmodel.ErrRPCNoTxInfo {
break
}
return nil, err
}
for _, result := range results {
// Mempool transactions and red block transactions bring about unnecessary complexity, so
// simply don't bother processing them
if result.IsInMempool || *result.Confirmations == 0 {
continue
}
tx, err := parseRawTransactionResult(result)
if err != nil {
return nil, errors.Errorf("failed to process SearchRawTransactionResult: %s", err)
}
if tx == nil {
continue
}
txID := tx.TxID()
utilTx := util.NewTx(tx)
if existingTx, ok := walletTxs[*txID]; !ok || !existingTx.confirmed {
walletTxs[*txID] = &walletTransaction{
tx: utilTx,
checkConfirmationCountdown: requiredConfirmations,
confirmed: isTxMatured(tx, *result.Confirmations),
}
}
if tx.SubnetworkID.IsEqual(subnetworkid.SubnetworkIDRegistry) {
registryTxs = append(registryTxs, utilTx)
}
}
skip += searchRawTransactionResultCount
}
err := updateSubnetworks(registryTxs, gasLimitMap)
if err != nil {
return nil, err
}
return walletTxs, nil
}
func parseRawTransactionResult(result *rpcmodel.SearchRawTransactionsResult) (*wire.MsgTx, error) {
txBytes, err := hex.DecodeString(result.Hex)
if err != nil {
return nil, errors.Errorf("failed to decode transaction bytes: %s", err)
}
var tx wire.MsgTx
reader := bytes.NewReader(txBytes)
err = tx.Deserialize(reader)
if err != nil {
return nil, errors.Errorf("failed to deserialize transaction: %s", err)
}
return &tx, nil
}

View File

@@ -1,15 +0,0 @@
ISC License
Copyright (c) 2018 The Decred developers
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -1,58 +0,0 @@
dnsseeder
=========
## Requirements
Latest version of [Go](http://golang.org) (currently 1.13)
## Getting Started
- Install Go according to the installation instructions here:
http://golang.org/doc/install
- Ensure Go was installed properly and is a supported version:
- Launch a kaspad node for the dnsseeder to connect to
```bash
$ go version
$ go env GOROOT GOPATH
```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as
`~/dev/go` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain dnsseeder, all dependencies, and install it:
```bash
$ git clone https://github.com/kaspanet/dnsseeder $GOPATH/src/github.com/kaspanet/dnsseeder
$ cd $GOPATH/src/github.com/kaspanet/dnsseeder
$ go install .
```
- dnsseeder will now be installed in either ```$GOROOT/bin``` or
```$GOPATH/bin``` depending on your configuration. If you did not already
add the bin directory to your system path during Go installation, we
recommend you do so now.
To start dnsseeder listening on udp 127.0.0.1:5354 with an initial connection to working testnet node running on 127.0.0.1:
```
$ ./dnsseeder -n nameserver.example.com -H network-seed.example.com -s 127.0.0.1 --testnet
```
You will then need to redirect DNS traffic on your public IP port 53 to 127.0.0.1:5354
Note: to listen directly on port 53 on most Unix systems, one has to run dnsseeder as root, which is discouraged
## Setting up DNS Records
To create a working set-up where dnsseeder can provide IPs to kaspad instances, set the following DNS records:
```
NAME TYPE VALUE
---- ---- -----
[your.domain.name] A [your ip address]
[ns-your.domain.name] NS [your.domain.name]
```

View File

@@ -1,147 +0,0 @@
// Copyright (c) 2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/kaspanet/kaspad/config"
"github.com/pkg/errors"
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/util"
)
const (
defaultConfigFilename = "dnsseeder.conf"
defaultLogFilename = "dnsseeder.log"
defaultErrLogFilename = "dnsseeder_err.log"
defaultListenPort = "5354"
)
var (
// Default configuration options
defaultHomeDir = util.AppDataDir("dnsseeder", false)
defaultConfigFile = filepath.Join(defaultHomeDir, defaultConfigFilename)
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
)
var activeConfig *ConfigFlags
// ActiveConfig returns the active configuration struct
func ActiveConfig() *ConfigFlags {
return activeConfig
}
// ConfigFlags holds the configurations set by the command line argument
type ConfigFlags struct {
Host string `short:"H" long:"host" description:"Seed DNS address"`
Listen string `long:"listen" short:"l" description:"Listen on address:port"`
Nameserver string `short:"n" long:"nameserver" description:"hostname of nameserver"`
Seeder string `short:"s" long:"default seeder" description:"IP address of a working node"`
config.NetworkFlags
}
func loadConfig() (*ConfigFlags, error) {
err := os.MkdirAll(defaultHomeDir, 0700)
if err != nil {
// Show a nicer error message if it's because a symlink is
// linked to a directory that does not exist (probably because
// it's not mounted).
if e, ok := err.(*os.PathError); ok && os.IsExist(err) {
if link, lerr := os.Readlink(e.Path); lerr == nil {
str := "is symlink %s -> %s mounted?"
err = errors.Errorf(str, e.Path, link)
}
}
str := "failed to create home directory: %v"
err := errors.Errorf(str, err)
fmt.Fprintln(os.Stderr, err)
return nil, err
}
// Default config.
activeConfig = &ConfigFlags{
Listen: normalizeAddress("localhost", defaultListenPort),
}
preCfg := activeConfig
preParser := flags.NewParser(preCfg, flags.Default)
_, err = preParser.Parse()
if err != nil {
e, ok := err.(*flags.Error)
if ok && e.Type == flags.ErrHelp {
os.Exit(0)
}
preParser.WriteHelp(os.Stderr)
return nil, err
}
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
// Load additional config from file.
parser := flags.NewParser(activeConfig, flags.Default)
err = flags.NewIniParser(parser).ParseFile(defaultConfigFile)
if err != nil {
if _, ok := err.(*os.PathError); !ok {
fmt.Fprintf(os.Stderr, "Error parsing ConfigFlags "+
"file: %v\n", err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, err
}
}
// Parse command line options again to ensure they take precedence.
_, err = parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, err
}
if len(activeConfig.Host) == 0 {
str := "Please specify a hostname"
err := errors.Errorf(str)
fmt.Fprintln(os.Stderr, err)
return nil, err
}
if len(activeConfig.Nameserver) == 0 {
str := "Please specify a nameserver"
err := errors.Errorf(str)
fmt.Fprintln(os.Stderr, err)
return nil, err
}
activeConfig.Listen = normalizeAddress(activeConfig.Listen, defaultListenPort)
err = activeConfig.ResolveNetwork(parser)
if err != nil {
return nil, err
}
initLog(defaultLogFile, defaultErrLogFile)
return activeConfig, nil
}
// normalizeAddress returns addr with the passed default port appended if
// there is not already a port specified.
func normalizeAddress(addr, defaultPort string) string {
_, _, err := net.SplitHostPort(addr)
if err != nil {
return net.JoinHostPort(addr, defaultPort)
}
return addr
}

View File

@@ -1,238 +0,0 @@
// Copyright (c) 2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"github.com/pkg/errors"
"net"
"os"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/connmgr"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
"github.com/miekg/dns"
)
// DNSServer struct
type DNSServer struct {
hostname string
listen string
nameserver string
}
// Start - starts server
func (d *DNSServer) Start() {
defer wg.Done()
rr := fmt.Sprintf("%s 86400 IN NS %s", d.hostname, d.nameserver)
authority, err := dns.NewRR(rr)
if err != nil {
log.Infof("NewRR: %v", err)
return
}
udpAddr, err := net.ResolveUDPAddr("udp4", d.listen)
if err != nil {
log.Infof("ResolveUDPAddr: %v", err)
return
}
udpListen, err := net.ListenUDP("udp", udpAddr)
if err != nil {
log.Infof("ListenUDP: %v", err)
return
}
defer udpListen.Close()
for {
b := make([]byte, 512)
mainLoop:
err := udpListen.SetReadDeadline(time.Now().Add(time.Second))
if err != nil {
log.Infof("SetReadDeadline: %v", err)
os.Exit(1)
}
_, addr, err := udpListen.ReadFromUDP(b)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
if atomic.LoadInt32(&systemShutdown) == 0 {
// use goto in order to do not re-allocate 'b' buffer
goto mainLoop
}
log.Infof("DNS server shutdown")
return
}
log.Infof("Read: %T", err.(*net.OpError).Err)
continue
}
wg.Add(1)
spawn(func() { d.handleDNSRequest(addr, authority, udpListen, b) })
}
}
// NewDNSServer - create DNS server
func NewDNSServer(hostname, nameserver, listen string) *DNSServer {
if hostname[len(hostname)-1] != '.' {
hostname = hostname + "."
}
if nameserver[len(nameserver)-1] != '.' {
nameserver = nameserver + "."
}
return &DNSServer{
hostname: hostname,
listen: listen,
nameserver: nameserver,
}
}
func (d *DNSServer) extractServicesSubnetworkID(addr *net.UDPAddr, domainName string) (wire.ServiceFlag, *subnetworkid.SubnetworkID, bool, error) {
// Domain name may be in following format:
// [n[subnetwork].][xservice.]hostname
// where connmgr.SubnetworkIDPrefixChar and connmgr.ServiceFlagPrefixChar are prefexes
wantedSF := wire.SFNodeNetwork
var subnetworkID *subnetworkid.SubnetworkID
includeAllSubnetworks := true
if d.hostname != domainName {
idx := 0
labels := dns.SplitDomainName(domainName)
if labels[0][0] == connmgr.SubnetworkIDPrefixChar {
includeAllSubnetworks = false
if len(labels[0]) > 1 {
idx = 1
subnetworkID, err := subnetworkid.NewFromStr(labels[0][1:])
if err != nil {
log.Infof("%s: subnetworkid.NewFromStr: %v", addr, err)
return wantedSF, subnetworkID, includeAllSubnetworks, err
}
}
}
if labels[idx][0] == connmgr.ServiceFlagPrefixChar && len(labels[idx]) > 1 {
wantedSFStr := labels[idx][1:]
u, err := strconv.ParseUint(wantedSFStr, 10, 64)
if err != nil {
log.Infof("%s: ParseUint: %v", addr, err)
return wantedSF, subnetworkID, includeAllSubnetworks, err
}
wantedSF = wire.ServiceFlag(u)
}
}
return wantedSF, subnetworkID, includeAllSubnetworks, nil
}
func (d *DNSServer) validateDNSRequest(addr *net.UDPAddr, b []byte) (dnsMsg *dns.Msg, domainName string, atype string, err error) {
dnsMsg = new(dns.Msg)
err = dnsMsg.Unpack(b[:])
if err != nil {
log.Infof("%s: invalid dns message: %v", addr, err)
return nil, "", "", err
}
if len(dnsMsg.Question) != 1 {
str := fmt.Sprintf("%s sent more than 1 question: %d", addr, len(dnsMsg.Question))
log.Infof("%s", str)
return nil, "", "", errors.Errorf("%s", str)
}
domainName = strings.ToLower(dnsMsg.Question[0].Name)
ff := strings.LastIndex(domainName, d.hostname)
if ff < 0 {
str := fmt.Sprintf("invalid name: %s", dnsMsg.Question[0].Name)
log.Infof("%s", str)
return nil, "", "", errors.Errorf("%s", str)
}
atype, err = translateDNSQuestion(addr, dnsMsg)
return dnsMsg, domainName, atype, err
}
func translateDNSQuestion(addr *net.UDPAddr, dnsMsg *dns.Msg) (string, error) {
var atype string
qtype := dnsMsg.Question[0].Qtype
switch qtype {
case dns.TypeA:
atype = "A"
case dns.TypeAAAA:
atype = "AAAA"
case dns.TypeNS:
atype = "NS"
default:
str := fmt.Sprintf("%s: invalid qtype: %d", addr, dnsMsg.Question[0].Qtype)
log.Infof("%s", str)
return "", errors.Errorf("%s", str)
}
return atype, nil
}
func (d *DNSServer) buildDNSResponse(addr *net.UDPAddr, authority dns.RR, dnsMsg *dns.Msg,
wantedSF wire.ServiceFlag, includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID, atype string) ([]byte, error) {
respMsg := dnsMsg.Copy()
respMsg.Authoritative = true
respMsg.Response = true
qtype := dnsMsg.Question[0].Qtype
if qtype != dns.TypeNS {
respMsg.Ns = append(respMsg.Ns, authority)
addrs := amgr.GoodAddresses(qtype, wantedSF, includeAllSubnetworks, subnetworkID)
for _, a := range addrs {
rr := fmt.Sprintf("%s 30 IN %s %s", dnsMsg.Question[0].Name, atype, a.IP.String())
newRR, err := dns.NewRR(rr)
if err != nil {
log.Infof("%s: NewRR: %v", addr, err)
return nil, err
}
respMsg.Answer = append(respMsg.Answer, newRR)
}
} else {
rr := fmt.Sprintf("%s 86400 IN NS %s", dnsMsg.Question[0].Name, d.nameserver)
newRR, err := dns.NewRR(rr)
if err != nil {
log.Infof("%s: NewRR: %v", addr, err)
return nil, err
}
respMsg.Answer = append(respMsg.Answer, newRR)
}
sendBytes, err := respMsg.Pack()
if err != nil {
log.Infof("%s: failed to pack response: %v", addr, err)
return nil, err
}
return sendBytes, nil
}
func (d *DNSServer) handleDNSRequest(addr *net.UDPAddr, authority dns.RR, udpListen *net.UDPConn, b []byte) {
defer wg.Done()
dnsMsg, domainName, atype, err := d.validateDNSRequest(addr, b)
if err != nil {
return
}
wantedSF, subnetworkID, includeAllSubnetworks, err := d.extractServicesSubnetworkID(addr, domainName)
if err != nil {
return
}
log.Infof("%s: query %d for services %v, subnetwork ID %v",
addr, dnsMsg.Question[0].Qtype, wantedSF, subnetworkID)
sendBytes, err := d.buildDNSResponse(addr, authority, dnsMsg, wantedSF, includeAllSubnetworks, subnetworkID, atype)
if err != nil {
return
}
_, err = udpListen.WriteToUDP(sendBytes, addr)
if err != nil {
log.Infof("%s: failed to write response: %v", addr, err)
return
}
}

View File

@@ -1,218 +0,0 @@
// Copyright (c) 2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"net"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/connmgr"
"github.com/kaspanet/kaspad/peer"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/wire"
)
const (
// nodeTimeout defines the timeout time waiting for
// a response from a node.
nodeTimeout = time.Second * 3
// requiredServices describes the default services that are
// required to be supported by outbound peers.
requiredServices = wire.SFNodeNetwork
)
var (
amgr *Manager
wg sync.WaitGroup
peersDefaultPort int
systemShutdown int32
)
// hostLookup returns the correct DNS lookup function to use depending on the
// passed host and configuration options. For example, .onion addresses will be
// resolved using the onion specific proxy if one was specified, but will
// otherwise treat the normal proxy as tor unless --noonion was specified in
// which case the lookup will fail. Meanwhile, normal IP addresses will be
// resolved using tor if a proxy was specified unless --noonion was also
// specified in which case the normal system DNS resolver will be used.
func hostLookup(host string) ([]net.IP, error) {
return net.LookupIP(host)
}
func creep() {
defer wg.Done()
onAddr := make(chan struct{})
onVersion := make(chan struct{})
cfg := peer.Config{
UserAgentName: "daglabs-sniffer",
UserAgentVersion: "0.0.1",
DAGParams: ActiveConfig().NetParams(),
DisableRelayTx: true,
SelectedTip: func() *daghash.Hash { return ActiveConfig().NetParams().GenesisBlock.BlockHash() },
Listeners: peer.MessageListeners{
OnAddr: func(p *peer.Peer, msg *wire.MsgAddr) {
added := amgr.AddAddresses(msg.AddrList)
log.Infof("Peer %v sent %v addresses, %d new",
p.Addr(), len(msg.AddrList), added)
onAddr <- struct{}{}
},
OnVersion: func(p *peer.Peer, msg *wire.MsgVersion) {
log.Infof("Adding peer %v with services %v and subnetword ID %v",
p.NA().IP.String(), msg.Services, msg.SubnetworkID)
// Mark this peer as a good node.
amgr.Good(p.NA().IP, msg.Services, msg.SubnetworkID)
// Ask peer for some addresses.
p.QueueMessage(wire.NewMsgGetAddr(true, nil), nil)
// notify that version is received and Peer's subnetwork ID is updated
onVersion <- struct{}{}
},
},
}
var wgCreep sync.WaitGroup
for {
peers := amgr.Addresses()
if len(peers) == 0 && amgr.AddressCount() == 0 {
// Add peers discovered through DNS to the address manager.
connmgr.SeedFromDNS(ActiveConfig().NetParams(), requiredServices, true, nil, hostLookup, func(addrs []*wire.NetAddress) {
amgr.AddAddresses(addrs)
})
peers = amgr.Addresses()
}
if len(peers) == 0 {
log.Infof("No stale addresses -- sleeping for 10 minutes")
for i := 0; i < 600; i++ {
time.Sleep(time.Second)
if atomic.LoadInt32(&systemShutdown) != 0 {
log.Infof("Creep thread shutdown")
return
}
}
continue
}
for _, addr := range peers {
if atomic.LoadInt32(&systemShutdown) != 0 {
log.Infof("Waiting creep threads to terminate")
wgCreep.Wait()
log.Infof("Creep thread shutdown")
return
}
wgCreep.Add(1)
go func(addr *wire.NetAddress) {
defer wgCreep.Done()
host := net.JoinHostPort(addr.IP.String(), strconv.Itoa(int(addr.Port)))
p, err := peer.NewOutboundPeer(&cfg, host)
if err != nil {
log.Warnf("NewOutboundPeer on %v: %v",
host, err)
return
}
amgr.Attempt(addr.IP)
conn, err := net.DialTimeout("tcp", p.Addr(), nodeTimeout)
if err != nil {
log.Warnf("%v", err)
return
}
p.AssociateConnection(conn)
// Wait version messsage or timeout in case of failure.
select {
case <-onVersion:
case <-time.After(nodeTimeout):
log.Warnf("version timeout on peer %v",
p.Addr())
p.Disconnect()
return
}
select {
case <-onAddr:
case <-time.After(nodeTimeout):
log.Warnf("getaddr timeout on peer %v",
p.Addr())
p.Disconnect()
return
}
p.Disconnect()
}(addr)
}
wgCreep.Wait()
}
}
func main() {
defer panics.HandlePanic(log, nil, nil)
cfg, err := loadConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "loadConfig: %v\n", err)
os.Exit(1)
}
amgr, err = NewManager(defaultHomeDir)
if err != nil {
fmt.Fprintf(os.Stderr, "NewManager: %v\n", err)
os.Exit(1)
}
peersDefaultPort, err = strconv.Atoi(ActiveConfig().NetParams().DefaultPort)
if err != nil {
fmt.Fprintf(os.Stderr, "Invalid peers default port %s: %v\n", ActiveConfig().NetParams().DefaultPort, err)
os.Exit(1)
}
if len(cfg.Seeder) != 0 {
ip := net.ParseIP(cfg.Seeder)
if ip == nil {
hostAddrs, err := net.LookupHost(cfg.Seeder)
if err != nil {
log.Warnf("Failed to resolve seed host: %v, %v, ignoring", cfg.Seeder, err)
} else {
ip = net.ParseIP(hostAddrs[0])
if ip == nil {
log.Warnf("Failed to resolve seed host: %v, ignoring", cfg.Seeder)
}
}
}
if ip != nil {
amgr.AddAddresses([]*wire.NetAddress{
wire.NewNetAddressIPPort(ip, uint16(peersDefaultPort),
requiredServices)})
}
}
wg.Add(1)
spawn(creep)
dnsServer := NewDNSServer(cfg.Host, cfg.Nameserver, cfg.Listen)
wg.Add(1)
spawn(dnsServer.Start)
defer func() {
log.Infof("Gracefully shutting down the seeder...")
atomic.StoreInt32(&systemShutdown, 1)
close(amgr.quit)
wg.Wait()
amgr.wg.Wait()
log.Infof("Seeder shutdown complete")
}()
// Wait until the interrupt signal is received from an OS signal or
// shutdown is requested through one of the subsystems such as the RPC
// server.
interrupt := signal.InterruptListener()
<-interrupt
}

View File

@@ -1,28 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd dnsseeder && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o dnsseeder .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/dnsseeder/ /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/dnsseeder"]

View File

@@ -1,9 +0,0 @@
1. Since btcd is not public repository still, copy/checkout
https://github.com/kaspanet/kaspad into vendor/github.com/kaspanet/kaspad before
running "docker build".
2. To build docker image invoke following command from dnsseeder directory:
docker build -t dnsseeder -f ./docker/Dockerfile .
3. To run
sudo docker run -u root -p 53:53/udp dnsseeder

View File

@@ -1,27 +0,0 @@
package main
import (
"fmt"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/util/panics"
"os"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("SEED")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
err := backendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = backendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}

View File

@@ -1,376 +0,0 @@
// Copyright (c) 2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/json"
"github.com/pkg/errors"
"net"
"os"
"path/filepath"
"sync"
"time"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/kaspanet/kaspad/wire"
"github.com/miekg/dns"
)
// Node repesents a node in the Kaspa network
type Node struct {
Addr *wire.NetAddress
Services wire.ServiceFlag
LastAttempt time.Time
LastSuccess time.Time
LastSeen time.Time
SubnetworkID *subnetworkid.SubnetworkID
}
// Manager is dnsseeder's main worker-type, storing all information required
// for operation
type Manager struct {
mtx sync.RWMutex
nodes map[string]*Node
wg sync.WaitGroup
quit chan struct{}
peersFile string
}
const (
// defaultMaxAddresses is the maximum number of addresses to return.
defaultMaxAddresses = 16
// defaultStaleTimeout is the time in which a host is considered
// stale.
defaultStaleTimeout = time.Hour
// dumpAddressInterval is the interval used to dump the address
// cache to disk for future use.
dumpAddressInterval = time.Second * 30
// peersFilename is the name of the file.
peersFilename = "nodes.json"
// pruneAddressInterval is the interval used to run the address
// pruner.
pruneAddressInterval = time.Minute * 1
// pruneExpireTimeout is the expire time in which a node is
// considered dead.
pruneExpireTimeout = time.Hour * 8
)
var (
// rfc1918Nets specifies the IPv4 private address blocks as defined by
// by RFC1918 (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16).
rfc1918Nets = []net.IPNet{
ipNet("10.0.0.0", 8, 32),
ipNet("172.16.0.0", 12, 32),
ipNet("192.168.0.0", 16, 32),
}
// rfc3964Net specifies the IPv6 to IPv4 encapsulation address block as
// defined by RFC3964 (2002::/16).
rfc3964Net = ipNet("2002::", 16, 128)
// rfc4380Net specifies the IPv6 teredo tunneling over UDP address block
// as defined by RFC4380 (2001::/32).
rfc4380Net = ipNet("2001::", 32, 128)
// rfc4843Net specifies the IPv6 ORCHID address block as defined by
// RFC4843 (2001:10::/28).
rfc4843Net = ipNet("2001:10::", 28, 128)
// rfc4862Net specifies the IPv6 stateless address autoconfiguration
// address block as defined by RFC4862 (FE80::/64).
rfc4862Net = ipNet("FE80::", 64, 128)
// rfc4193Net specifies the IPv6 unique local address block as defined
// by RFC4193 (FC00::/7).
rfc4193Net = ipNet("FC00::", 7, 128)
)
// ipNet returns a net.IPNet struct given the passed IP address string, number
// of one bits to include at the start of the mask, and the total number of bits
// for the mask.
func ipNet(ip string, ones, bits int) net.IPNet {
return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)}
}
func isRoutable(addr net.IP) bool {
if ActiveConfig().NetParams().AcceptUnroutable {
return true
}
for _, n := range rfc1918Nets {
if n.Contains(addr) {
return false
}
}
if rfc3964Net.Contains(addr) ||
rfc4380Net.Contains(addr) ||
rfc4843Net.Contains(addr) ||
rfc4862Net.Contains(addr) ||
rfc4193Net.Contains(addr) {
return false
}
return true
}
// NewManager constructs and returns a new dnsseeder manager, with the provided dataDir
func NewManager(dataDir string) (*Manager, error) {
amgr := Manager{
nodes: make(map[string]*Node),
peersFile: filepath.Join(dataDir, peersFilename),
quit: make(chan struct{}),
}
err := amgr.deserializePeers()
if err != nil {
log.Warnf("Failed to parse file %s: %v", amgr.peersFile, err)
// if it is invalid we nuke the old one unconditionally.
err = os.Remove(amgr.peersFile)
if err != nil {
log.Warnf("Failed to remove corrupt peers file %s: %v",
amgr.peersFile, err)
}
}
amgr.wg.Add(1)
spawn(amgr.addressHandler)
return &amgr, nil
}
// AddAddresses adds an address to this dnsseeder manager, and returns the number of
// address currently held
func (m *Manager) AddAddresses(addrs []*wire.NetAddress) int {
var count int
m.mtx.Lock()
for _, addr := range addrs {
if !isRoutable(addr.IP) {
continue
}
addrStr := addr.IP.String()
_, exists := m.nodes[addrStr]
if exists {
m.nodes[addrStr].LastSeen = time.Now()
continue
}
node := Node{
Addr: addr,
LastSeen: time.Now(),
}
m.nodes[addrStr] = &node
count++
}
m.mtx.Unlock()
return count
}
// Addresses returns IPs that need to be tested again.
func (m *Manager) Addresses() []*wire.NetAddress {
addrs := make([]*wire.NetAddress, 0, defaultMaxAddresses*8)
now := time.Now()
i := defaultMaxAddresses
m.mtx.RLock()
for _, node := range m.nodes {
if i == 0 {
break
}
if now.Sub(node.LastSuccess) < defaultStaleTimeout ||
now.Sub(node.LastAttempt) < defaultStaleTimeout {
continue
}
addrs = append(addrs, node.Addr)
i--
}
m.mtx.RUnlock()
return addrs
}
// AddressCount returns number of known nodes.
func (m *Manager) AddressCount() int {
return len(m.nodes)
}
// GoodAddresses returns good working IPs that match both the
// passed DNS query type and have the requested services.
func (m *Manager) GoodAddresses(qtype uint16, services wire.ServiceFlag, includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) []*wire.NetAddress {
addrs := make([]*wire.NetAddress, 0, defaultMaxAddresses)
i := defaultMaxAddresses
if qtype != dns.TypeA && qtype != dns.TypeAAAA {
return addrs
}
now := time.Now()
m.mtx.RLock()
for _, node := range m.nodes {
if i == 0 {
break
}
if node.Addr.Port != uint16(peersDefaultPort) {
continue
}
if !includeAllSubnetworks && !node.SubnetworkID.IsEqual(subnetworkID) {
continue
}
if qtype == dns.TypeA && node.Addr.IP.To4() == nil {
continue
} else if qtype == dns.TypeAAAA && node.Addr.IP.To4() != nil {
continue
}
if node.LastSuccess.IsZero() ||
now.Sub(node.LastSuccess) > defaultStaleTimeout {
continue
}
// Does the node have the requested services?
if node.Services&services != services {
continue
}
addrs = append(addrs, node.Addr)
i--
}
m.mtx.RUnlock()
return addrs
}
// Attempt updates the last connection attempt for the specified ip address to now
func (m *Manager) Attempt(ip net.IP) {
m.mtx.Lock()
node, exists := m.nodes[ip.String()]
if exists {
node.LastAttempt = time.Now()
}
m.mtx.Unlock()
}
// Good updates the last successful connection attempt for the specified ip address to now
func (m *Manager) Good(ip net.IP, services wire.ServiceFlag, subnetworkid *subnetworkid.SubnetworkID) {
m.mtx.Lock()
node, exists := m.nodes[ip.String()]
if exists {
node.Services = services
node.LastSuccess = time.Now()
node.SubnetworkID = subnetworkid
}
m.mtx.Unlock()
}
// addressHandler is the main handler for the address manager. It must be run
// as a goroutine.
func (m *Manager) addressHandler() {
defer m.wg.Done()
pruneAddressTicker := time.NewTicker(pruneAddressInterval)
defer pruneAddressTicker.Stop()
dumpAddressTicker := time.NewTicker(dumpAddressInterval)
defer dumpAddressTicker.Stop()
out:
for {
select {
case <-dumpAddressTicker.C:
m.savePeers()
case <-pruneAddressTicker.C:
m.prunePeers()
case <-m.quit:
break out
}
}
log.Infof("Address manager: saving peers")
m.savePeers()
log.Infof("Address manager shoutdown")
}
func (m *Manager) prunePeers() {
var count int
now := time.Now()
m.mtx.Lock()
for k, node := range m.nodes {
if now.Sub(node.LastSeen) > pruneExpireTimeout {
delete(m.nodes, k)
count++
continue
}
if !node.LastSuccess.IsZero() &&
now.Sub(node.LastSuccess) > pruneExpireTimeout {
delete(m.nodes, k)
count++
continue
}
}
l := len(m.nodes)
m.mtx.Unlock()
log.Infof("Pruned %d addresses: %d remaining", count, l)
}
func (m *Manager) deserializePeers() error {
filePath := m.peersFile
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return nil
}
r, err := os.Open(filePath)
if err != nil {
return errors.Errorf("%s error opening file: %v", filePath, err)
}
defer r.Close()
var nodes map[string]*Node
dec := json.NewDecoder(r)
err = dec.Decode(&nodes)
if err != nil {
return errors.Errorf("error reading %s: %v", filePath, err)
}
l := len(nodes)
m.mtx.Lock()
m.nodes = nodes
m.mtx.Unlock()
log.Infof("%d nodes loaded", l)
return nil
}
func (m *Manager) savePeers() {
m.mtx.RLock()
defer m.mtx.RUnlock()
// Write temporary peers file and then move it into place.
tmpfile := m.peersFile + ".new"
w, err := os.Create(tmpfile)
if err != nil {
log.Errorf("Error opening file %s: %v", tmpfile, err)
return
}
enc := json.NewEncoder(w)
if err := enc.Encode(&m.nodes); err != nil {
log.Errorf("Failed to encode file %s: %v", tmpfile, err)
return
}
if err := w.Close(); err != nil {
log.Errorf("Error closing file %s: %v", tmpfile, err)
return
}
if err := os.Rename(tmpfile, m.peersFile); err != nil {
log.Errorf("Error writing file %s: %v", m.peersFile, err)
return
}
}

View File

@@ -1,121 +0,0 @@
package config
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/kasparov/logger"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"path/filepath"
)
const (
defaultLogFilename = "faucet.log"
defaultErrLogFilename = "faucet_err.log"
)
var (
// Default configuration options
defaultLogDir = util.AppDataDir("faucet", false)
defaultDBAddress = "localhost:3306"
defaultHTTPListen = "0.0.0.0:8081"
// activeNetParams are the currently active net params
activeNetParams *dagconfig.Params
)
// Config defines the configuration options for the API server.
type Config struct {
LogDir string `long:"logdir" description:"Directory to log output."`
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8081)"`
KasparovdURL string `long:"kasparovd-url" description:"The API server url to connect to"`
PrivateKey string `long:"private-key" description:"Faucet Private key"`
DBAddress string `long:"dbaddress" description:"Database address"`
DBUser string `long:"dbuser" description:"Database user" required:"true"`
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
DBName string `long:"dbname" description:"Database name" required:"true"`
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The server will not start when using this flag."`
FeeRate float64 `long:"fee-rate" description:"Coins per gram fee rate"`
TestNet bool `long:"testnet" description:"Connect to testnet"`
SimNet bool `long:"simnet" description:"Connect to the simulation test network"`
DevNet bool `long:"devnet" description:"Connect to the development test network"`
}
var cfg *Config
// Parse parses the CLI arguments and returns a config struct.
func Parse() error {
cfg = &Config{
LogDir: defaultLogDir,
DBAddress: defaultDBAddress,
HTTPListen: defaultHTTPListen,
}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
if !cfg.Migrate {
if cfg.KasparovdURL == "" {
return errors.New("api-server-url argument is required when --migrate flag is not raised")
}
if cfg.PrivateKey == "" {
return errors.New("private-key argument is required when --migrate flag is not raised")
}
}
err = resolveNetwork(cfg)
if err != nil {
return err
}
logFile := filepath.Join(cfg.LogDir, defaultLogFilename)
errLogFile := filepath.Join(cfg.LogDir, defaultErrLogFilename)
logger.InitLog(logFile, errLogFile)
return nil
}
func resolveNetwork(cfg *Config) error {
// Multiple networks can't be selected simultaneously.
numNets := 0
if cfg.TestNet {
numNets++
}
if cfg.SimNet {
numNets++
}
if cfg.DevNet {
numNets++
}
if numNets > 1 {
return errors.New("multiple net params (testnet, simnet, devnet, etc.) can't be used " +
"together -- choose one of them")
}
activeNetParams = &dagconfig.MainNetParams
switch {
case cfg.TestNet:
activeNetParams = &dagconfig.TestNetParams
case cfg.SimNet:
activeNetParams = &dagconfig.SimNetParams
case cfg.DevNet:
activeNetParams = &dagconfig.DevNetParams
}
return nil
}
// MainConfig is a getter to the main config
func MainConfig() (*Config, error) {
if cfg == nil {
return nil, errors.New("No configuration was set for the faucet")
}
return cfg, nil
}
// ActiveNetParams returns the currently active net params
func ActiveNetParams() *dagconfig.Params {
return activeNetParams
}

View File

@@ -1,151 +0,0 @@
package database
import (
nativeerrors "errors"
"fmt"
"github.com/pkg/errors"
"os"
"github.com/golang-migrate/migrate/v4/source"
"github.com/jinzhu/gorm"
"github.com/kaspanet/kaspad/faucet/config"
"github.com/golang-migrate/migrate/v4"
)
// db is the API server database.
var db *gorm.DB
// DB returns a reference to the database connection
func DB() (*gorm.DB, error) {
if db == nil {
return nil, errors.New("Database is not connected")
}
return db, nil
}
type gormLogger struct{}
func (l gormLogger) Print(v ...interface{}) {
str := fmt.Sprint(v...)
log.Errorf(str)
}
// Connect connects to the database mentioned in
// config variable.
func Connect() error {
connectionString, err := buildConnectionString()
if err != nil {
return err
}
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if !isCurrent {
return errors.Errorf("Database is not current (version %d). Please migrate"+
" the database by running the faucet with --migrate flag and then run it again.", version)
}
db, err = gorm.Open("mysql", connectionString)
if err != nil {
return err
}
db.SetLogger(gormLogger{})
return nil
}
// Close closes the connection to the database
func Close() error {
if db == nil {
return nil
}
err := db.Close()
db = nil
return err
}
func buildConnectionString() (string, error) {
cfg, err := config.MainConfig()
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName), nil
}
// isCurrent resolves whether the database is on the latest
// version of the schema.
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
// Get the current version
version, isDirty, err := migrator.Version()
if nativeerrors.Is(err, migrate.ErrNilVersion) {
return false, 0, nil
}
if err != nil {
return false, 0, err
}
if isDirty {
return false, 0, errors.Errorf("Database is dirty")
}
// The database is current if Next returns ErrNotExist
_, err = driver.Next(version)
if pathErr, ok := err.(*os.PathError); ok {
if pathErr.Err == os.ErrNotExist {
return true, version, nil
}
}
return false, version, err
}
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
driver, err := source.Open("file://migrations")
if err != nil {
return nil, nil, err
}
migrator, err := migrate.NewWithSourceInstance(
"migrations", driver, "mysql://"+connectionString)
if err != nil {
return nil, nil, err
}
return migrator, driver, nil
}
// Migrate database to the latest version.
func Migrate() error {
connectionString, err := buildConnectionString()
if err != nil {
return err
}
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if isCurrent {
log.Infof("Database is already up-to-date (version %d)", version)
return nil
}
err = migrator.Up()
if err != nil {
return err
}
version, isDirty, err := migrator.Version()
if err != nil {
return err
}
if isDirty {
return errors.Errorf("error migrating database: database is dirty")
}
log.Infof("Migrated database to the latest version (version %d)", version)
return nil
}

View File

@@ -1,9 +0,0 @@
package database
import "github.com/kaspanet/kaspad/util/panics"
import "github.com/kaspanet/kaspad/kasparov/logger"
var (
log = logger.BackendLog.Logger("DTBS")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1,28 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd faucet && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o faucet .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/faucet /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/faucet"]

View File

@@ -1,332 +0,0 @@
package main
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/faucet/config"
"github.com/kaspanet/kaspad/httpserverutils"
"github.com/kaspanet/kaspad/kasparov/kasparovd/apimodels"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
sendAmount = 10000
// Value 8 bytes + serialized varint size for the length of ScriptPubKey +
// ScriptPubKey bytes.
outputSize uint64 = 8 + 1 + 25
minTxFee uint64 = 3000
requiredConfirmations = 10
)
type utxoSet map[wire.Outpoint]*blockdag.UTXOEntry
// apiURL returns a full concatenated URL from the base
// API server URL and the given path.
func apiURL(requestPath string) (string, error) {
cfg, err := config.MainConfig()
if err != nil {
return "", err
}
u, err := url.Parse(cfg.KasparovdURL)
if err != nil {
return "", errors.WithStack(err)
}
u.Path = path.Join(u.Path, requestPath)
return u.String(), nil
}
// getFromAPIServer makes an HTTP GET request to the API server
// to the given request path, and returns the response body.
func getFromAPIServer(requestPath string) ([]byte, error) {
getAPIURL, err := apiURL(requestPath)
if err != nil {
return nil, err
}
resp, err := http.Get(getAPIURL)
if err != nil {
return nil, errors.WithStack(err)
}
defer func() {
err := resp.Body.Close()
if err != nil {
panic(errors.WithStack(err))
}
}()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.WithStack(err)
}
if resp.StatusCode != http.StatusOK {
clientError := &httpserverutils.ClientError{}
err := json.Unmarshal(body, &clientError)
if err != nil {
return nil, errors.WithStack(err)
}
return nil, errors.WithStack(clientError)
}
return body, nil
}
// getFromAPIServer makes an HTTP POST request to the API server
// to the given request path. It converts the given data to JSON,
// and post it as the POST data.
func postToAPIServer(requestPath string, data interface{}) error {
dataBytes, err := json.Marshal(data)
if err != nil {
return errors.WithStack(err)
}
r := bytes.NewReader(dataBytes)
postAPIURL, err := apiURL(requestPath)
if err != nil {
return err
}
resp, err := http.Post(postAPIURL, "application/json", r)
if err != nil {
return errors.WithStack(err)
}
defer func() {
err := resp.Body.Close()
if err != nil {
panic(errors.WithStack(err))
}
}()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return errors.WithStack(err)
}
clientError := &httpserverutils.ClientError{}
err = json.Unmarshal(body, &clientError)
if err != nil {
return errors.WithStack(err)
}
return errors.WithStack(clientError)
}
return nil
}
func isUTXOMatured(entry *blockdag.UTXOEntry, confirmations uint64) bool {
if entry.IsCoinbase() {
return confirmations >= config.ActiveNetParams().BlockCoinbaseMaturity
}
return confirmations >= requiredConfirmations
}
func getWalletUTXOSet() (utxoSet, error) {
body, err := getFromAPIServer(fmt.Sprintf("utxos/address/%s", faucetAddress.EncodeAddress()))
if err != nil {
return nil, err
}
utxoResponses := []*apimodels.TransactionOutputResponse{}
err = json.Unmarshal(body, &utxoResponses)
if err != nil {
return nil, err
}
walletUTXOSet := make(utxoSet)
for _, utxoResponse := range utxoResponses {
scriptPubKey, err := hex.DecodeString(utxoResponse.ScriptPubKey)
if err != nil {
return nil, err
}
txOut := &wire.TxOut{
Value: utxoResponse.Value,
ScriptPubKey: scriptPubKey,
}
txID, err := daghash.NewTxIDFromStr(utxoResponse.TransactionID)
if err != nil {
return nil, err
}
outpoint := wire.NewOutpoint(txID, utxoResponse.Index)
utxoEntry := blockdag.NewUTXOEntry(txOut, *utxoResponse.IsCoinbase, utxoResponse.AcceptingBlockBlueScore)
if !isUTXOMatured(utxoEntry, *utxoResponse.Confirmations) {
continue
}
walletUTXOSet[*outpoint] = utxoEntry
}
return walletUTXOSet, nil
}
func sendToAddress(address util.Address) (*wire.MsgTx, error) {
tx, err := createTx(address)
if err != nil {
return nil, err
}
buf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize()))
if err := tx.Serialize(buf); err != nil {
return nil, err
}
rawTx := &apimodels.RawTransaction{RawTransaction: hex.EncodeToString(buf.Bytes())}
return tx, postToAPIServer("transaction", rawTx)
}
func createTx(address util.Address) (*wire.MsgTx, error) {
walletUTXOSet, err := getWalletUTXOSet()
if err != nil {
return nil, err
}
tx, err := createUnsignedTx(walletUTXOSet, address)
if err != nil {
return nil, err
}
err = signTx(walletUTXOSet, tx)
if err != nil {
return nil, err
}
return tx, nil
}
func createUnsignedTx(walletUTXOSet utxoSet, address util.Address) (*wire.MsgTx, error) {
tx := wire.NewNativeMsgTx(wire.TxVersion, nil, nil)
netAmount, isChangeOutputRequired, err := fundTx(walletUTXOSet, tx, sendAmount)
if err != nil {
return nil, err
}
if isChangeOutputRequired {
tx.AddTxOut(&wire.TxOut{
Value: sendAmount,
ScriptPubKey: address.ScriptAddress(),
})
tx.AddTxOut(&wire.TxOut{
Value: netAmount - sendAmount,
ScriptPubKey: faucetScriptPubKey,
})
return tx, nil
}
tx.AddTxOut(&wire.TxOut{
Value: netAmount,
ScriptPubKey: address.ScriptAddress(),
})
return tx, nil
}
// signTx signs a transaction
func signTx(walletUTXOSet utxoSet, tx *wire.MsgTx) error {
for i, txIn := range tx.TxIn {
outpoint := txIn.PreviousOutpoint
sigScript, err := txscript.SignatureScript(tx, i, walletUTXOSet[outpoint].ScriptPubKey(),
txscript.SigHashAll, faucetPrivateKey, true)
if err != nil {
return errors.Errorf("Failed to sign transaction: %s", err)
}
txIn.SignatureScript = sigScript
}
return nil
}
func fundTx(walletUTXOSet utxoSet, tx *wire.MsgTx, amount uint64) (netAmount uint64, isChangeOutputRequired bool, err error) {
amountSelected := uint64(0)
isTxFunded := false
for outpoint, entry := range walletUTXOSet {
amountSelected += entry.Amount()
// Add the selected output to the transaction
tx.AddTxIn(wire.NewTxIn(&outpoint, nil))
// Check if transaction has enough funds. If we don't have enough
// coins from the current amount selected to pay the fee continue
// to grab more coins.
isTxFunded, isChangeOutputRequired, netAmount, err = isFundedAndIsChangeOutputRequired(tx, amountSelected, amount, walletUTXOSet)
if err != nil {
return 0, false, err
}
if isTxFunded {
break
}
}
if !isTxFunded {
return 0, false, errors.Errorf("not enough funds for coin selection")
}
return netAmount, isChangeOutputRequired, nil
}
// isFundedAndIsChangeOutputRequired returns three values and an error:
// * isTxFunded is whether the transaction inputs cover the target amount + the required fee.
// * isChangeOutputRequired is whether it is profitable to add an additional change
// output to the transaction.
// * netAmount is the amount of coins that will be eventually sent to the recipient. If no
// change output is needed, the netAmount will be usually a little bit higher than the
// targetAmount. Otherwise, it'll be the same as the targetAmount.
func isFundedAndIsChangeOutputRequired(tx *wire.MsgTx, amountSelected uint64, targetAmount uint64, walletUTXOSet utxoSet) (isTxFunded, isChangeOutputRequired bool, netAmount uint64, err error) {
// First check if it can be funded with one output and the required fee for it.
isFundedWithOneOutput, oneOutputFee, err := isFundedWithNumberOfOutputs(tx, 1, amountSelected, targetAmount, walletUTXOSet)
if err != nil {
return false, false, 0, err
}
if !isFundedWithOneOutput {
return false, false, 0, nil
}
// Now check if it can be funded with two outputs and the required fee for it.
isFundedWithTwoOutputs, twoOutputsFee, err := isFundedWithNumberOfOutputs(tx, 2, amountSelected, targetAmount, walletUTXOSet)
if err != nil {
return false, false, 0, err
}
// If it can be funded with two outputs, check if adding a change output worth it: i.e. check if
// the amount you save by not sending the recipient the whole inputs amount (minus fees) is greater
// than the additional fee that is required by adding a change output. If this is the case, return
// isChangeOutputRequired as true.
if isFundedWithTwoOutputs && twoOutputsFee-oneOutputFee < targetAmount-amountSelected {
return true, true, amountSelected - twoOutputsFee, nil
}
return true, false, amountSelected - oneOutputFee, nil
}
// isFundedWithNumberOfOutputs returns whether the transaction inputs cover
// the target amount + the required fee with the assumed number of outputs.
func isFundedWithNumberOfOutputs(tx *wire.MsgTx, numberOfOutputs uint64, amountSelected uint64, targetAmount uint64, walletUTXOSet utxoSet) (isTxFunded bool, fee uint64, err error) {
reqFee, err := calcFee(tx, numberOfOutputs, walletUTXOSet)
if err != nil {
return false, 0, err
}
return amountSelected > reqFee && amountSelected-reqFee >= targetAmount, reqFee, nil
}
func calcFee(msgTx *wire.MsgTx, numberOfOutputs uint64, walletUTXOSet utxoSet) (uint64, error) {
txMass := calcTxMass(msgTx, walletUTXOSet)
txMassWithOutputs := txMass + outputsTotalSize(numberOfOutputs)*blockdag.MassPerTxByte
cfg, err := config.MainConfig()
if err != nil {
return 0, err
}
reqFee := uint64(float64(txMassWithOutputs) * cfg.FeeRate)
if reqFee < minTxFee {
return minTxFee, nil
}
return reqFee, nil
}
func outputsTotalSize(numberOfOutputs uint64) uint64 {
return numberOfOutputs*outputSize + uint64(wire.VarIntSerializeSize(numberOfOutputs))
}
func calcTxMass(msgTx *wire.MsgTx, walletUTXOSet utxoSet) uint64 {
previousScriptPubKeys := getPreviousScriptPubKeys(msgTx, walletUTXOSet)
return blockdag.CalcTxMass(util.NewTx(msgTx), previousScriptPubKeys)
}
func getPreviousScriptPubKeys(msgTx *wire.MsgTx, walletUTXOSet utxoSet) [][]byte {
previousScriptPubKeys := make([][]byte, len(msgTx.TxIn))
for i, txIn := range msgTx.TxIn {
outpoint := txIn.PreviousOutpoint
previousScriptPubKeys[i] = walletUTXOSet[outpoint].ScriptPubKey()
}
return previousScriptPubKeys
}

View File

@@ -1,66 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/faucet/database"
"github.com/kaspanet/kaspad/httpserverutils"
"github.com/pkg/errors"
"net"
"net/http"
"time"
)
const minRequestInterval = time.Hour * 24
type ipUse struct {
IP string
LastUse time.Time
}
func ipFromRequest(r *http.Request) (string, error) {
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return "", err
}
return ip, nil
}
func validateIPUsage(r *http.Request) error {
db, err := database.DB()
if err != nil {
return err
}
now := time.Now()
timeBeforeMinRequestInterval := now.Add(-minRequestInterval)
var count int
ip, err := ipFromRequest(r)
if err != nil {
return err
}
dbResult := db.Model(&ipUse{}).Where(&ipUse{IP: ip}).Where("last_use BETWEEN ? AND ?", timeBeforeMinRequestInterval, now).Count(&count)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return httpserverutils.NewErrorFromDBErrors("Some errors were encountered when checking the last use of an IP:", dbResult.GetErrors())
}
if count != 0 {
return httpserverutils.NewHandlerError(http.StatusForbidden, errors.New("A user is allowed to to have one request from the faucet every 24 hours"))
}
return nil
}
func updateIPUsage(r *http.Request) error {
db, err := database.DB()
if err != nil {
return err
}
ip, err := ipFromRequest(r)
if err != nil {
return err
}
dbResult := db.Where(&ipUse{IP: ip}).Assign(&ipUse{LastUse: time.Now()}).FirstOrCreate(&ipUse{})
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return httpserverutils.NewErrorFromDBErrors("Some errors were encountered when upserting the IP to the new date:", dbResult.GetErrors())
}
return nil
}

View File

@@ -1,11 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
log = logger.BackendLog.Logger("FAUC")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1,88 +0,0 @@
package main
import (
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/ecc"
"github.com/kaspanet/kaspad/faucet/config"
"github.com/kaspanet/kaspad/faucet/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/base58"
"github.com/pkg/errors"
"os"
_ "github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
)
var (
faucetAddress util.Address
faucetPrivateKey *ecc.PrivateKey
faucetScriptPubKey []byte
)
func main() {
defer panics.HandlePanic(log, nil, nil)
err := config.Parse()
if err != nil {
err := errors.Wrap(err, "Error parsing command-line arguments")
_, err = fmt.Fprintf(os.Stderr, err.Error())
if err != nil {
panic(err)
}
return
}
cfg, err := config.MainConfig()
if err != nil {
panic(err)
}
if cfg.Migrate {
err := database.Migrate()
if err != nil {
panic(errors.Errorf("Error migrating database: %s", err))
}
return
}
err = database.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to database: %s", err))
}
defer func() {
err := database.Close()
if err != nil {
panic(errors.Errorf("Error closing the database: %s", err))
}
}()
privateKeyBytes := base58.Decode(cfg.PrivateKey)
faucetPrivateKey, _ = ecc.PrivKeyFromBytes(ecc.S256(), privateKeyBytes)
faucetAddress, err = privateKeyToP2PKHAddress(faucetPrivateKey, config.ActiveNetParams())
if err != nil {
panic(errors.Errorf("Failed to get P2PKH address from private key: %s", err))
}
faucetScriptPubKey, err = txscript.PayToAddrScript(faucetAddress)
if err != nil {
panic(errors.Errorf("failed to generate faucetScriptPubKey to address: %s", err))
}
shutdownServer := startHTTPServer(cfg.HTTPListen)
defer shutdownServer()
interrupt := signal.InterruptListener()
<-interrupt
}
// privateKeyToP2PKHAddress generates p2pkh address from private key.
func privateKeyToP2PKHAddress(key *ecc.PrivateKey, net *dagconfig.Params) (util.Address, error) {
return util.NewAddressPubKeyHashFromPublicKey(key.PubKey().SerializeCompressed(), net.Prefix)
}

View File

@@ -1 +0,0 @@
DROP TABLE `ip_uses`;

View File

@@ -1,6 +0,0 @@
CREATE TABLE `ip_uses`
(
`ip` VARCHAR(39) NOT NULL,
`last_use` DATETIME NOT NULL,
PRIMARY KEY (`ip`)
);

View File

@@ -1,81 +0,0 @@
package main
import (
"context"
"encoding/json"
"github.com/kaspanet/kaspad/faucet/config"
"github.com/kaspanet/kaspad/httpserverutils"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"net/http"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
const gracefulShutdownTimeout = 30 * time.Second
// startHTTPServer starts the HTTP REST server and returns a
// function to gracefully shutdown it.
func startHTTPServer(listenAddr string) func() {
router := mux.NewRouter()
router.Use(httpserverutils.AddRequestMetadataMiddleware)
router.Use(httpserverutils.RecoveryMiddleware)
router.Use(httpserverutils.LoggingMiddleware)
router.Use(httpserverutils.SetJSONMiddleware)
router.HandleFunc(
"/request_money",
httpserverutils.MakeHandler(requestMoneyHandler)).
Methods("POST")
httpServer := &http.Server{
Addr: listenAddr,
Handler: handlers.CORS()(router),
}
spawn(func() {
log.Errorf("%s", httpServer.ListenAndServe())
})
return func() {
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
defer cancel()
err := httpServer.Shutdown(ctx)
if err != nil {
log.Errorf("Error shutting down HTTP server: %s", err)
}
}
}
type requestMoneyData struct {
Address string `json:"address"`
}
func requestMoneyHandler(_ *httpserverutils.ServerContext, r *http.Request, _ map[string]string, _ map[string]string,
requestBody []byte) (interface{}, error) {
hErr := validateIPUsage(r)
if hErr != nil {
return nil, hErr
}
requestData := &requestMoneyData{}
err := json.Unmarshal(requestBody, requestData)
if err != nil {
return nil, httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error unmarshalling request body"),
"The request body is not json-formatted")
}
address, err := util.DecodeAddress(requestData.Address, config.ActiveNetParams().Prefix)
if err != nil {
return nil, httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding address"),
"Error decoding address")
}
tx, err := sendToAddress(address)
if err != nil {
return nil, err
}
hErr = updateIPUsage(r)
if hErr != nil {
return nil, hErr
}
return tx.TxID().String(), nil
}

View File

@@ -1,7 +1,9 @@
package httpserverutils
import "github.com/kaspanet/kaspad/util/panics"
import "github.com/kaspanet/kaspad/kasparov/logger"
import (
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
log = logger.BackendLog.Logger("UTIL")

View File

@@ -1,69 +0,0 @@
package config
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/config"
"github.com/kaspanet/kaspad/kasparov/logger"
"github.com/pkg/errors"
"path/filepath"
)
var (
// Default configuration options
defaultDBAddress = "localhost:3306"
)
// KasparovFlags holds configuration common to both the Kasparov server and the Kasparov daemon.
type KasparovFlags struct {
LogDir string `long:"logdir" description:"Directory to log output."`
DebugLevel string `short:"d" long:"debuglevel" description:"Set log level {trace, debug, info, warn, error, critical}"`
DBAddress string `long:"dbaddress" description:"Database address"`
DBUser string `long:"dbuser" description:"Database user" required:"true"`
DBPassword string `long:"dbpass" description:"Database password" required:"true"`
DBName string `long:"dbname" description:"Database name" required:"true"`
RPCUser string `short:"u" long:"rpcuser" description:"RPC username"`
RPCPassword string `short:"P" long:"rpcpass" default-mask:"-" description:"RPC password"`
RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"`
RPCCert string `short:"c" long:"rpccert" description:"RPC server certificate chain for validation"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
config.NetworkFlags
}
// ResolveKasparovFlags parses command line arguments and sets KasparovFlags accordingly.
func (kasparovFlags *KasparovFlags) ResolveKasparovFlags(parser *flags.Parser,
defaultLogDir, logFilename, errLogFilename string) error {
if kasparovFlags.LogDir == "" {
kasparovFlags.LogDir = defaultLogDir
}
logFile := filepath.Join(kasparovFlags.LogDir, logFilename)
errLogFile := filepath.Join(kasparovFlags.LogDir, errLogFilename)
logger.InitLog(logFile, errLogFile)
if kasparovFlags.DebugLevel != "" {
err := logger.SetLogLevels(kasparovFlags.DebugLevel)
if err != nil {
return err
}
}
if kasparovFlags.DBAddress == "" {
kasparovFlags.DBAddress = defaultDBAddress
}
if kasparovFlags.RPCUser == "" {
return errors.New("--rpcuser is required")
}
if kasparovFlags.RPCPassword == "" {
return errors.New("--rpcpass is required")
}
if kasparovFlags.RPCServer == "" {
return errors.New("--rpcserver is required")
}
if kasparovFlags.RPCCert == "" && !kasparovFlags.DisableTLS {
return errors.New("--notls has to be disabled if --cert is used")
}
if kasparovFlags.RPCCert != "" && kasparovFlags.DisableTLS {
return errors.New("--cert should be omitted if --notls is used")
}
return kasparovFlags.ResolveNetwork(parser)
}

View File

@@ -1,141 +0,0 @@
package database
import (
nativeerrors "errors"
"fmt"
"github.com/kaspanet/kaspad/kasparov/config"
"github.com/pkg/errors"
"os"
"github.com/golang-migrate/migrate/v4/source"
"github.com/jinzhu/gorm"
"github.com/golang-migrate/migrate/v4"
)
// db is the Kasparov database.
var db *gorm.DB
// DB returns a reference to the database connection
func DB() (*gorm.DB, error) {
if db == nil {
return nil, errors.New("Database is not connected")
}
return db, nil
}
type gormLogger struct{}
func (l gormLogger) Print(v ...interface{}) {
str := fmt.Sprint(v...)
log.Errorf(str)
}
// Connect connects to the database mentioned in
// config variable.
func Connect(cfg *config.KasparovFlags) error {
connectionString := buildConnectionString(cfg)
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if !isCurrent {
return errors.Errorf("Database is not current (version %d). Please migrate"+
" the database by running the server with --migrate flag and then run it again.", version)
}
db, err = gorm.Open("mysql", connectionString)
if err != nil {
return err
}
db.SetLogger(gormLogger{})
return nil
}
// Close closes the connection to the database
func Close() error {
if db == nil {
return nil
}
err := db.Close()
db = nil
return err
}
func buildConnectionString(cfg *config.KasparovFlags) string {
return fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True",
cfg.DBUser, cfg.DBPassword, cfg.DBAddress, cfg.DBName)
}
// isCurrent resolves whether the database is on the latest
// version of the schema.
func isCurrent(migrator *migrate.Migrate, driver source.Driver) (bool, uint, error) {
// Get the current version
version, isDirty, err := migrator.Version()
if nativeerrors.Is(err, migrate.ErrNilVersion) {
return false, 0, nil
}
if err != nil {
return false, 0, errors.WithStack(err)
}
if isDirty {
return false, 0, errors.Errorf("Database is dirty")
}
// The database is current if Next returns ErrNotExist
_, err = driver.Next(version)
if pathErr, ok := err.(*os.PathError); ok {
if pathErr.Err == os.ErrNotExist {
return true, version, nil
}
}
return false, version, err
}
func openMigrator(connectionString string) (*migrate.Migrate, source.Driver, error) {
driver, err := source.Open("file://../database/migrations")
if err != nil {
return nil, nil, err
}
migrator, err := migrate.NewWithSourceInstance(
"migrations", driver, "mysql://"+connectionString)
if err != nil {
return nil, nil, err
}
return migrator, driver, nil
}
// Migrate database to the latest version.
func Migrate(cfg *config.KasparovFlags) error {
connectionString := buildConnectionString(cfg)
migrator, driver, err := openMigrator(connectionString)
if err != nil {
return err
}
isCurrent, version, err := isCurrent(migrator, driver)
if err != nil {
return errors.Errorf("Error checking whether the database is current: %s", err)
}
if isCurrent {
log.Infof("Database is already up-to-date (version %d)", version)
return nil
}
err = migrator.Up()
if err != nil {
return err
}
version, isDirty, err := migrator.Version()
if err != nil {
return err
}
if isDirty {
return errors.Errorf("error migrating database: database is dirty")
}
log.Infof("Migrated database to the latest version (version %d)", version)
return nil
}

View File

@@ -1,9 +0,0 @@
package database
import "github.com/kaspanet/kaspad/util/panics"
import "github.com/kaspanet/kaspad/kasparov/logger"
var (
log = logger.Logger("DTBS")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1 +0,0 @@
DROP TABLE `blocks`;

View File

@@ -1,23 +0,0 @@
CREATE TABLE `blocks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`block_hash` CHAR(64) NOT NULL,
`accepting_block_id` BIGINT UNSIGNED NULL,
`version` INT NOT NULL,
`hash_merkle_root` CHAR(64) NOT NULL,
`accepted_id_merkle_root` CHAR(64) NOT NULL,
`utxo_commitment` CHAR(64) NOT NULL,
`timestamp` DATETIME NOT NULL,
`bits` INT UNSIGNED NOT NULL,
`nonce` BIGINT UNSIGNED NOT NULL,
`blue_score` BIGINT UNSIGNED NOT NULL,
`is_chain_block` TINYINT NOT NULL,
`mass` BIGINT NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_blocks_block_hash` (`block_hash`),
INDEX `idx_blocks_timestamp` (`timestamp`),
INDEX `idx_blocks_is_chain_block` (`is_chain_block`),
CONSTRAINT `fk_blocks_accepting_block_id`
FOREIGN KEY (`accepting_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `parent_blocks`;

View File

@@ -1,12 +0,0 @@
CREATE TABLE `parent_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`parent_block_id` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`block_id`, `parent_block_id`),
CONSTRAINT `fk_parent_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`),
CONSTRAINT `fk_parent_blocks_parent_block_id`
FOREIGN KEY (`parent_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `raw_blocks`;

View File

@@ -1,9 +0,0 @@
CREATE TABLE `raw_blocks`
(
`block_id` BIGINT UNSIGNED NOT NULL,
`block_data` MEDIUMBLOB NOT NULL,
PRIMARY KEY (`block_id`),
CONSTRAINT `fk_raw_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `subnetworks`;

View File

@@ -1,8 +0,0 @@
CREATE TABLE `subnetworks`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`subnetwork_id` CHAR(64) NOT NULL,
`gas_limit` BIGINT UNSIGNED NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_subnetworks_subnetwork_id` (`subnetwork_id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `transactions`;

View File

@@ -1,19 +0,0 @@
CREATE TABLE `transactions`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`accepting_block_id` BIGINT UNSIGNED NULL,
`transaction_hash` CHAR(64) NOT NULL,
`transaction_id` CHAR(64) NOT NULL,
`lock_time` BIGINT UNSIGNED NOT NULL,
`subnetwork_id` BIGINT UNSIGNED NOT NULL,
`gas` BIGINT UNSIGNED NOT NULL,
`payload_hash` CHAR(64) NOT NULL,
`payload` BLOB NOT NULL,
`mass` BIGINT NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_transactions_transaction_hash` (`transaction_hash`),
INDEX `idx_transactions_transaction_id` (`transaction_id`),
CONSTRAINT `fk_transactions_accepting_block_id`
FOREIGN KEY (`accepting_block_id`)
REFERENCES `blocks` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `transactions_to_blocks`;

View File

@@ -1,14 +0,0 @@
CREATE TABLE `transactions_to_blocks`
(
`transaction_id` BIGINT UNSIGNED NOT NULL,
`block_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
PRIMARY KEY (`transaction_id`, `block_id`),
INDEX `idx_transactions_to_blocks_index` (`index`),
CONSTRAINT `fk_transactions_to_blocks_block_id`
FOREIGN KEY (`block_id`)
REFERENCES `blocks` (`id`),
CONSTRAINT `fk_transactions_to_blocks_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `addresses`;

View File

@@ -1,7 +0,0 @@
CREATE TABLE `addresses`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`address` CHAR(50) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `idx_addresses_address` (`address`)
)

View File

@@ -1 +0,0 @@
DROP TABLE `transaction_outputs`;

View File

@@ -1,18 +0,0 @@
CREATE TABLE `transaction_outputs`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`transaction_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
`value` BIGINT UNSIGNED NOT NULL,
`script_pub_key` BLOB NOT NULL,
`is_spent` TINYINT NOT NULL,
`address_id` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`id`),
INDEX `idx_transaction_outputs_transaction_id` (`transaction_id`),
CONSTRAINT `fk_transaction_outputs_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`),
CONSTRAINT `fk_transaction_outputs_address_id`
FOREIGN KEY (`address_id`)
REFERENCES `addresses` (`id`)
);

View File

@@ -1 +0,0 @@
DROP TABLE `transaction_inputs`;

View File

@@ -1,18 +0,0 @@
CREATE TABLE `transaction_inputs`
(
`id` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
`transaction_id` BIGINT UNSIGNED NULL,
`previous_transaction_output_id` BIGINT UNSIGNED NOT NULL,
`index` INT UNSIGNED NOT NULL,
`signature_script` BLOB NOT NULL,
`sequence` BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (`id`),
INDEX `idx_transaction_inputs_transaction_id` (`transaction_id`),
INDEX `idx_transaction_inputs_previous_transaction_output_id` (`previous_transaction_output_id`),
CONSTRAINT `fk_transaction_inputs_transaction_id`
FOREIGN KEY (`transaction_id`)
REFERENCES `transactions` (`id`),
CONSTRAINT `fk_transaction_inputs_previous_transaction_output_id`
FOREIGN KEY (`previous_transaction_output_id`)
REFERENCES `transaction_outputs` (`id`)
);

View File

@@ -1,111 +0,0 @@
package dbmodels
import (
"time"
)
// Block is the gorm model for the 'blocks' table
type Block struct {
ID uint64 `gorm:"primary_key"`
BlockHash string
AcceptingBlockID *uint64
AcceptingBlock *Block
Version int32
HashMerkleRoot string
AcceptedIDMerkleRoot string
UTXOCommitment string
Timestamp time.Time
Bits uint32
Nonce uint64
BlueScore uint64
IsChainBlock bool
Mass uint64
ParentBlocks []Block `gorm:"many2many:parent_blocks;"`
}
// ParentBlock is the gorm model for the 'parent_blocks' table
type ParentBlock struct {
BlockID uint64
Block Block
ParentBlockID uint64
ParentBlock Block
}
// RawBlock is the gorm model for the 'raw_blocks' table
type RawBlock struct {
BlockID uint64
Block Block
BlockData []byte
}
// Subnetwork is the gorm model for the 'subnetworks' table
type Subnetwork struct {
ID uint64 `gorm:"primary_key"`
SubnetworkID string
GasLimit *uint64
}
// Transaction is the gorm model for the 'transactions' table
type Transaction struct {
ID uint64 `gorm:"primary_key"`
AcceptingBlockID *uint64
AcceptingBlock *Block
TransactionHash string
TransactionID string
LockTime uint64
SubnetworkID uint64
Subnetwork Subnetwork
Gas uint64
PayloadHash string
Payload []byte
Mass uint64
Blocks []Block `gorm:"many2many:transactions_to_blocks;"`
TransactionOutputs []TransactionOutput
TransactionInputs []TransactionInput
}
// TransactionBlock is the gorm model for the 'transactions_to_blocks' table
type TransactionBlock struct {
TransactionID uint64
Transaction Transaction
BlockID uint64
Block Block
Index uint32
}
// TableName returns the table name associated to the
// TransactionBlock gorm model
func (TransactionBlock) TableName() string {
return "transactions_to_blocks"
}
// TransactionOutput is the gorm model for the 'transaction_outputs' table
type TransactionOutput struct {
ID uint64 `gorm:"primary_key"`
TransactionID uint64
Transaction Transaction
Index uint32
Value uint64
ScriptPubKey []byte
IsSpent bool
AddressID uint64
Address Address
}
// TransactionInput is the gorm model for the 'transaction_inputs' table
type TransactionInput struct {
ID uint64 `gorm:"primary_key"`
TransactionID uint64
Transaction Transaction
PreviousTransactionOutputID uint64
PreviousTransactionOutput TransactionOutput
Index uint32
SignatureScript []byte
Sequence uint64
}
// Address is the gorm model for the 'addresses' table
type Address struct {
ID uint64 `gorm:"primary_key"`
Address string
}

View File

@@ -1,124 +0,0 @@
package jsonrpc
import (
"github.com/kaspanet/kaspad/kasparov/config"
"github.com/pkg/errors"
"io/ioutil"
"time"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
)
// Client represents a connection to the JSON-RPC API of a full node
type Client struct {
*rpcclient.Client
OnBlockAdded chan *BlockAddedMsg
OnChainChanged chan *ChainChangedMsg
}
var client *Client
// GetClient returns an instance of the JSON-RPC client, in case we have an active connection
func GetClient() (*Client, error) {
if client == nil {
return nil, errors.New("JSON-RPC is not connected")
}
return client, nil
}
// BlockAddedMsg defines the message received in onBlockAdded
type BlockAddedMsg struct {
ChainHeight uint64
Header *wire.BlockHeader
}
// ChainChangedMsg defines the message received in onChainChanged
type ChainChangedMsg struct {
RemovedChainBlockHashes []*daghash.Hash
AddedChainBlocks []*rpcclient.ChainBlock
}
// Close closes the connection to the JSON-RPC API server
func Close() {
if client == nil {
return
}
client.Disconnect()
client = nil
}
// Connect initiates a connection to the JSON-RPC API Server
func Connect(cfg *config.KasparovFlags) error {
var cert []byte
if !cfg.DisableTLS {
var err error
cert, err = ioutil.ReadFile(cfg.RPCCert)
if err != nil {
return errors.Errorf("Error reading certificates file: %s", err)
}
}
connCfg := &rpcclient.ConnConfig{
Host: cfg.RPCServer,
Endpoint: "ws",
User: cfg.RPCUser,
Pass: cfg.RPCPassword,
DisableTLS: cfg.DisableTLS,
RequestTimeout: time.Second * 60,
}
if !cfg.DisableTLS {
connCfg.Certificates = cert
}
var err error
client, err = newClient(connCfg)
if err != nil {
return errors.Errorf("Error connecting to address %s: %s", cfg.RPCServer, err)
}
return nil
}
func newClient(connCfg *rpcclient.ConnConfig) (*Client, error) {
client = &Client{
OnBlockAdded: make(chan *BlockAddedMsg),
OnChainChanged: make(chan *ChainChangedMsg),
}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
txs []*util.Tx) {
client.OnBlockAdded <- &BlockAddedMsg{
ChainHeight: height,
Header: header,
}
},
OnChainChanged: func(removedChainBlockHashes []*daghash.Hash,
addedChainBlocks []*rpcclient.ChainBlock) {
client.OnChainChanged <- &ChainChangedMsg{
RemovedChainBlockHashes: removedChainBlockHashes,
AddedChainBlocks: addedChainBlocks,
}
},
}
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", connCfg.Host, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
if err = client.NotifyChainChanges(); err != nil {
return nil, errors.Errorf("Error while registering client %s for chain changes notifications: %s", client.Host(), err)
}
return client, nil
}

View File

@@ -1,16 +0,0 @@
package jsonrpc
import (
"github.com/kaspanet/kaspad/kasparov/logger"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util/panics"
)
var (
log = logger.BackendLog.Logger("RPCC")
spawn = panics.GoroutineWrapperFunc(log)
)
func init() {
rpcclient.UseLogger(log)
}

View File

@@ -1,6 +0,0 @@
package apimodels
// RawTransaction is a json representation of a raw transaction
type RawTransaction struct {
RawTransaction string `json:"rawTransaction"`
}

View File

@@ -1,64 +0,0 @@
package apimodels
// TransactionResponse is a json representation of a transaction
type TransactionResponse struct {
TransactionHash string `json:"transactionHash"`
TransactionID string `json:"transactionId"`
AcceptingBlockHash *string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore *uint64 `json:"acceptingBlockBlueScore,omitempty"`
SubnetworkID string `json:"subnetworkId"`
LockTime uint64 `json:"lockTime"`
Gas uint64 `json:"gas,omitempty"`
PayloadHash string `json:"payloadHash,omitempty"`
Payload string `json:"payload,omitempty"`
Inputs []*TransactionInputResponse `json:"inputs"`
Outputs []*TransactionOutputResponse `json:"outputs"`
Mass uint64 `json:"mass"`
}
// TransactionOutputResponse is a json representation of a transaction output
type TransactionOutputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
Value uint64 `json:"value"`
ScriptPubKey string `json:"scriptPubKey"`
Address string `json:"address,omitempty"`
AcceptingBlockHash *string `json:"acceptingBlockHash,omitempty"`
AcceptingBlockBlueScore uint64 `json:"acceptingBlockBlueScore,omitempty"`
Index uint32 `json:"index"`
IsCoinbase *bool `json:"isCoinbase,omitempty"`
IsSpendable *bool `json:"isSpendable,omitempty"`
Confirmations *uint64 `json:"confirmations,omitempty"`
}
// TransactionInputResponse is a json representation of a transaction input
type TransactionInputResponse struct {
TransactionID string `json:"transactionId,omitempty"`
PreviousTransactionID string `json:"previousTransactionId"`
PreviousTransactionOutputIndex uint32 `json:"previousTransactionOutputIndex"`
SignatureScript string `json:"signatureScript"`
Sequence uint64 `json:"sequence"`
Address string `json:"address"`
}
// BlockResponse is a json representation of a block
type BlockResponse struct {
BlockHash string `json:"blockHash"`
Version int32 `json:"version"`
HashMerkleRoot string `json:"hashMerkleRoot"`
AcceptedIDMerkleRoot string `json:"acceptedIDMerkleRoot"`
UTXOCommitment string `json:"utxoCommitment"`
Timestamp uint64 `json:"timestamp"`
Bits uint32 `json:"bits"`
Nonce uint64 `json:"nonce"`
AcceptingBlockHash *string `json:"acceptingBlockHash"`
BlueScore uint64 `json:"blueScore"`
IsChainBlock bool `json:"isChainBlock"`
Mass uint64 `json:"mass"`
}
// FeeEstimateResponse is a json representation of a fee estimate
type FeeEstimateResponse struct {
HighPriority float64 `json:"highPriority"`
NormalPriority float64 `json:"normalPriority"`
LowPriority float64 `json:"lowPriority"`
}

View File

@@ -1,49 +0,0 @@
package config
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/kasparov/config"
"github.com/kaspanet/kaspad/util"
)
const (
logFilename = "kasparovd.log"
errLogFilename = "kasparovd_err.log"
)
var (
// Default configuration options
defaultLogDir = util.AppDataDir("kasparovd", false)
defaultHTTPListen = "0.0.0.0:8080"
activeConfig *Config
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *Config {
return activeConfig
}
// Config defines the configuration options for the API server.
type Config struct {
HTTPListen string `long:"listen" description:"HTTP address to listen on (default: 0.0.0.0:8080)"`
config.KasparovFlags
}
// Parse parses the CLI arguments and returns a config struct.
func Parse() error {
activeConfig = &Config{
HTTPListen: defaultHTTPListen,
}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = activeConfig.ResolveKasparovFlags(parser, defaultLogDir, logFilename, errLogFilename)
if err != nil {
return err
}
return nil
}

View File

@@ -1,110 +0,0 @@
package controllers
import (
"encoding/hex"
"net/http"
"github.com/kaspanet/kaspad/kasparov/database"
"github.com/kaspanet/kaspad/kasparov/dbmodels"
"github.com/kaspanet/kaspad/kasparov/kasparovd/apimodels"
"github.com/kaspanet/kaspad/httpserverutils"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/util/daghash"
)
const (
// OrderAscending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderAscending = "asc"
// OrderDescending is parameter that can be used
// in a get list handler to get a list ordered
// in an ascending order.
OrderDescending = "desc"
)
const maxGetBlocksLimit = 100
// GetBlockByHashHandler returns a block by a given hash.
func GetBlockByHashHandler(blockHash string) (interface{}, error) {
if bytes, err := hex.DecodeString(blockHash); err != nil || len(bytes) != daghash.HashSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given block hash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
block := &dbmodels.Block{}
dbResult := db.Where(&dbmodels.Block{BlockHash: blockHash}).Preload("AcceptingBlock").First(block)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No block with the given block hash was found"))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:",
dbResult.GetErrors())
}
return convertBlockModelToBlockResponse(block), nil
}
// GetBlocksHandler searches for all blocks
func GetBlocksHandler(order string, skip uint64, limit uint64) (interface{}, error) {
if limit < 1 || limit > maxGetBlocksLimit {
return nil, httpserverutils.NewHandlerError(http.StatusBadRequest,
errors.Errorf("Limit higher than %d or lower than 1 was requested.", maxGetTransactionsLimit))
}
blocks := []*dbmodels.Block{}
db, err := database.DB()
if err != nil {
return nil, err
}
query := db.
Limit(limit).
Offset(skip).
Preload("AcceptingBlock")
if order == OrderAscending {
query = query.Order("`id` ASC")
} else if order == OrderDescending {
query = query.Order("`id` DESC")
} else {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("'%s' is not a valid order", order))
}
query.Find(&blocks)
blockResponses := make([]*apimodels.BlockResponse, len(blocks))
for i, block := range blocks {
blockResponses[i] = convertBlockModelToBlockResponse(block)
}
return blockResponses, nil
}
// GetAcceptedTransactionIDsByBlockHashHandler returns an array of transaction IDs for a given block hash
func GetAcceptedTransactionIDsByBlockHashHandler(blockHash string) ([]string, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
var transactions []dbmodels.Transaction
dbResult := db.
Joins("LEFT JOIN `blocks` ON `blocks`.`id` = `transactions`.`accepting_block_id`").
Where("`blocks`.`block_hash` = ?", blockHash).
Find(&transactions)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Failed to find transactions: ", dbErrors)
}
result := make([]string, len(transactions))
for _, transaction := range transactions {
result = append(result, transaction.TransactionID)
}
return result, nil
}

View File

@@ -1,66 +0,0 @@
package controllers
import (
"encoding/hex"
"github.com/kaspanet/kaspad/kasparov/dbmodels"
"github.com/kaspanet/kaspad/kasparov/kasparovd/apimodels"
"github.com/kaspanet/kaspad/rpcmodel"
)
func convertTxDBModelToTxResponse(tx *dbmodels.Transaction) *apimodels.TransactionResponse {
txRes := &apimodels.TransactionResponse{
TransactionHash: tx.TransactionHash,
TransactionID: tx.TransactionID,
SubnetworkID: tx.Subnetwork.SubnetworkID,
LockTime: tx.LockTime,
Gas: tx.Gas,
PayloadHash: tx.PayloadHash,
Payload: hex.EncodeToString(tx.Payload),
Inputs: make([]*apimodels.TransactionInputResponse, len(tx.TransactionInputs)),
Outputs: make([]*apimodels.TransactionOutputResponse, len(tx.TransactionOutputs)),
Mass: tx.Mass,
}
if tx.AcceptingBlock != nil {
txRes.AcceptingBlockHash = &tx.AcceptingBlock.BlockHash
txRes.AcceptingBlockBlueScore = &tx.AcceptingBlock.BlueScore
}
for i, txOut := range tx.TransactionOutputs {
txRes.Outputs[i] = &apimodels.TransactionOutputResponse{
Value: txOut.Value,
ScriptPubKey: hex.EncodeToString(txOut.ScriptPubKey),
Address: txOut.Address.Address,
Index: txOut.Index,
}
}
for i, txIn := range tx.TransactionInputs {
txRes.Inputs[i] = &apimodels.TransactionInputResponse{
PreviousTransactionID: txIn.PreviousTransactionOutput.Transaction.TransactionID,
PreviousTransactionOutputIndex: txIn.PreviousTransactionOutput.Index,
SignatureScript: hex.EncodeToString(txIn.SignatureScript),
Sequence: txIn.Sequence,
Address: txIn.PreviousTransactionOutput.Address.Address,
}
}
return txRes
}
func convertBlockModelToBlockResponse(block *dbmodels.Block) *apimodels.BlockResponse {
blockRes := &apimodels.BlockResponse{
BlockHash: block.BlockHash,
Version: block.Version,
HashMerkleRoot: block.HashMerkleRoot,
AcceptedIDMerkleRoot: block.AcceptedIDMerkleRoot,
UTXOCommitment: block.UTXOCommitment,
Timestamp: uint64(block.Timestamp.Unix()),
Bits: block.Bits,
Nonce: block.Nonce,
BlueScore: block.BlueScore,
IsChainBlock: block.IsChainBlock,
Mass: block.Mass,
}
if block.AcceptingBlock != nil {
blockRes.AcceptingBlockHash = rpcmodel.String(block.AcceptingBlock.BlockHash)
}
return blockRes
}

View File

@@ -1,13 +0,0 @@
package controllers
import "github.com/kaspanet/kaspad/kasparov/kasparovd/apimodels"
// GetFeeEstimatesHandler returns the fee estimates for different priorities
// for accepting a transaction in the DAG.
func GetFeeEstimatesHandler() (interface{}, error) {
return &apimodels.FeeEstimateResponse{
HighPriority: 3,
NormalPriority: 2,
LowPriority: 1,
}, nil
}

View File

@@ -1,314 +0,0 @@
package controllers
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"github.com/kaspanet/kaspad/kasparov/database"
"github.com/kaspanet/kaspad/kasparov/dbmodels"
"github.com/kaspanet/kaspad/kasparov/jsonrpc"
"github.com/kaspanet/kaspad/kasparov/kasparovd/apimodels"
"github.com/kaspanet/kaspad/kasparov/kasparovd/config"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/httpserverutils"
"github.com/kaspanet/kaspad/util/subnetworkid"
"github.com/pkg/errors"
"github.com/jinzhu/gorm"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const maxGetTransactionsLimit = 1000
// GetTransactionByIDHandler returns a transaction by a given transaction ID.
func GetTransactionByIDHandler(txID string) (interface{}, error) {
if bytes, err := hex.DecodeString(txID); err != nil || len(bytes) != daghash.TxIDSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given txid is not a hex-encoded %d-byte hash.", daghash.TxIDSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
tx := &dbmodels.Transaction{}
query := db.Where(&dbmodels.Transaction{TransactionID: txID})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.New("No transaction with the given txid was found"))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
}
return convertTxDBModelToTxResponse(tx), nil
}
// GetTransactionByHashHandler returns a transaction by a given transaction hash.
func GetTransactionByHashHandler(txHash string) (interface{}, error) {
if bytes, err := hex.DecodeString(txHash); err != nil || len(bytes) != daghash.HashSize {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Errorf("The given txhash is not a hex-encoded %d-byte hash.", daghash.HashSize))
}
db, err := database.DB()
if err != nil {
return nil, err
}
tx := &dbmodels.Transaction{}
query := db.Where(&dbmodels.Transaction{TransactionHash: txHash})
dbResult := addTxPreloadedFields(query).First(&tx)
dbErrors := dbResult.GetErrors()
if httpserverutils.IsDBRecordNotFoundError(dbErrors) {
return nil, httpserverutils.NewHandlerError(http.StatusNotFound, errors.Errorf("No transaction with the given txhash was found."))
}
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transaction from the database:", dbErrors)
}
return convertTxDBModelToTxResponse(tx), nil
}
// GetTransactionsByAddressHandler searches for all transactions
// where the given address is either an input or an output.
func GetTransactionsByAddressHandler(address string, skip uint64, limit uint64) (interface{}, error) {
if limit < 1 || limit > maxGetTransactionsLimit {
return nil, httpserverutils.NewHandlerError(http.StatusBadRequest,
errors.Errorf("Limit higher than %d or lower than 1 was requested.", maxGetTransactionsLimit))
}
db, err := database.DB()
if err != nil {
return nil, err
}
txs := []*dbmodels.Transaction{}
query := joinTxInputsTxOutputsAndAddresses(db).
Where("`out_addresses`.`address` = ?", address).
Or("`in_addresses`.`address` = ?", address).
Limit(limit).
Offset(skip).
Order("`transactions`.`id` ASC")
dbResult := addTxPreloadedFields(query).Find(&txs)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
txResponses := make([]*apimodels.TransactionResponse, len(txs))
for i, tx := range txs {
txResponses[i] = convertTxDBModelToTxResponse(tx)
}
return txResponses, nil
}
func fetchSelectedTip() (*dbmodels.Block, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
block := &dbmodels.Block{}
dbResult := db.Order("blue_score DESC").
Where(&dbmodels.Block{IsChainBlock: true}).
First(block)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
return block, nil
}
func areTxsInBlock(blockID uint64, txIDs []uint64) (map[uint64]bool, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
transactionBlocks := []*dbmodels.TransactionBlock{}
dbErrors := db.
Where(&dbmodels.TransactionBlock{BlockID: blockID}).
Where("transaction_id in (?)", txIDs).
Find(&transactionBlocks).GetErrors()
if len(dbErrors) > 0 {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
}
isInBlock := make(map[uint64]bool)
for _, transactionBlock := range transactionBlocks {
isInBlock[transactionBlock.TransactionID] = true
}
return isInBlock, nil
}
// GetUTXOsByAddressHandler searches for all UTXOs that belong to a certain address.
func GetUTXOsByAddressHandler(address string) (interface{}, error) {
_, err := util.DecodeAddress(address, config.ActiveConfig().ActiveNetParams.Prefix)
if err != nil {
return nil, httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "error decoding address"),
"The given address is not a well-formatted P2PKH or P2SH address.")
}
db, err := database.DB()
if err != nil {
return nil, err
}
var transactionOutputs []*dbmodels.TransactionOutput
dbErrors := db.
Joins("LEFT JOIN `addresses` ON `addresses`.`id` = `transaction_outputs`.`address_id`").
Where("`addresses`.`address` = ? AND `transaction_outputs`.`is_spent` = 0", address).
Preload("Transaction.AcceptingBlock").
Preload("Transaction.Subnetwork").
Find(&transactionOutputs).GetErrors()
if len(dbErrors) > 0 {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading UTXOs from the database:", dbErrors)
}
nonAcceptedTxIds := make([]uint64, len(transactionOutputs))
for i, txOut := range transactionOutputs {
if txOut.Transaction.AcceptingBlock == nil {
nonAcceptedTxIds[i] = txOut.TransactionID
}
}
var selectedTip *dbmodels.Block
var isTxInSelectedTip map[uint64]bool
if len(nonAcceptedTxIds) != 0 {
selectedTip, err = fetchSelectedTip()
if err != nil {
return nil, err
}
isTxInSelectedTip, err = areTxsInBlock(selectedTip.ID, nonAcceptedTxIds)
if err != nil {
return nil, err
}
}
activeNetParams := config.ActiveConfig().NetParams()
UTXOsResponses := make([]*apimodels.TransactionOutputResponse, len(transactionOutputs))
for i, transactionOutput := range transactionOutputs {
subnetworkID := &subnetworkid.SubnetworkID{}
err := subnetworkid.Decode(subnetworkID, transactionOutput.Transaction.Subnetwork.SubnetworkID)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Couldn't decode subnetwork id %s", transactionOutput.Transaction.Subnetwork.SubnetworkID))
}
var acceptingBlockHash *string
var confirmations uint64
acceptingBlockBlueScore := blockdag.UnacceptedBlueScore
if isTxInSelectedTip[transactionOutput.ID] {
confirmations = 1
} else if transactionOutput.Transaction.AcceptingBlock != nil {
acceptingBlockHash = rpcmodel.String(transactionOutput.Transaction.AcceptingBlock.BlockHash)
acceptingBlockBlueScore = transactionOutput.Transaction.AcceptingBlock.BlueScore
confirmations = selectedTip.BlueScore - acceptingBlockBlueScore + 2
}
isCoinbase := subnetworkID.IsEqual(subnetworkid.SubnetworkIDCoinbase)
UTXOsResponses[i] = &apimodels.TransactionOutputResponse{
TransactionID: transactionOutput.Transaction.TransactionID,
Value: transactionOutput.Value,
ScriptPubKey: hex.EncodeToString(transactionOutput.ScriptPubKey),
AcceptingBlockHash: acceptingBlockHash,
AcceptingBlockBlueScore: acceptingBlockBlueScore,
Index: transactionOutput.Index,
IsCoinbase: rpcmodel.Bool(isCoinbase),
Confirmations: rpcmodel.Uint64(confirmations),
IsSpendable: rpcmodel.Bool(!isCoinbase || confirmations >= activeNetParams.BlockCoinbaseMaturity),
}
}
return UTXOsResponses, nil
}
func joinTxInputsTxOutputsAndAddresses(query *gorm.DB) *gorm.DB {
return query.
Joins("LEFT JOIN `transaction_outputs` ON `transaction_outputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `addresses` AS `out_addresses` ON `out_addresses`.`id` = `transaction_outputs`.`address_id`").
Joins("LEFT JOIN `transaction_inputs` ON `transaction_inputs`.`transaction_id` = `transactions`.`id`").
Joins("LEFT JOIN `transaction_outputs` AS `inputs_outs` ON `inputs_outs`.`id` = `transaction_inputs`.`previous_transaction_output_id`").
Joins("LEFT JOIN `addresses` AS `in_addresses` ON `in_addresses`.`id` = `inputs_outs`.`address_id`")
}
func addTxPreloadedFields(query *gorm.DB) *gorm.DB {
return query.Preload("AcceptingBlock").
Preload("Subnetwork").
Preload("TransactionOutputs").
Preload("TransactionOutputs.Address").
Preload("TransactionInputs.PreviousTransactionOutput.Transaction").
Preload("TransactionInputs.PreviousTransactionOutput.Address")
}
// PostTransaction forwards a raw transaction to the JSON-RPC API server
func PostTransaction(requestBody []byte) error {
client, err := jsonrpc.GetClient()
if err != nil {
return err
}
rawTx := &apimodels.RawTransaction{}
err = json.Unmarshal(requestBody, rawTx)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error unmarshalling request body"),
"The request body is not json-formatted")
}
txBytes, err := hex.DecodeString(rawTx.RawTransaction)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding hex raw transaction"),
"The raw transaction is not a hex-encoded transaction")
}
txReader := bytes.NewReader(txBytes)
tx := &wire.MsgTx{}
err = tx.KaspaDecode(txReader, 0)
if err != nil {
return httpserverutils.NewHandlerErrorWithCustomClientMessage(http.StatusUnprocessableEntity,
errors.Wrap(err, "Error decoding raw transaction"),
"Error decoding raw transaction")
}
_, err = client.SendRawTransaction(tx, true)
if err != nil {
switch err := errors.Cause(err).(type) {
case *rpcmodel.RPCError:
return httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, err)
default:
return err
}
}
return nil
}
// GetTransactionsByIDsHandler finds transactions by the given transactionIds.
func GetTransactionsByIDsHandler(transactionIds []string) ([]*apimodels.TransactionResponse, error) {
db, err := database.DB()
if err != nil {
return nil, err
}
var txs []*dbmodels.Transaction
query := joinTxInputsTxOutputsAndAddresses(db).
Where("`transactions`.`transaction_id` IN (?)", transactionIds)
dbResult := addTxPreloadedFields(query).Find(&txs)
dbErrors := dbResult.GetErrors()
if httpserverutils.HasDBError(dbErrors) {
return nil, httpserverutils.NewErrorFromDBErrors("Some errors were encountered when loading transactions from the database:", dbErrors)
}
txResponses := make([]*apimodels.TransactionResponse, len(txs))
for i, tx := range txs {
txResponses[i] = convertTxDBModelToTxResponse(tx)
}
return txResponses, nil
}

View File

@@ -1,29 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd kasparov/kasparovd && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kasparovd .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/kasparov/kasparovd/ /app/
COPY --from=build /go/src/github.com/kaspanet/kaspad/kasparov/database/migrations/ /database/migrations/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/kasparovd"]

View File

@@ -1,11 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/kasparov/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
log = logger.Logger("KVSV")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1,55 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/pkg/errors"
_ "github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/kaspanet/kaspad/kasparov/database"
"github.com/kaspanet/kaspad/kasparov/jsonrpc"
"github.com/kaspanet/kaspad/kasparov/kasparovd/config"
"github.com/kaspanet/kaspad/kasparov/kasparovd/server"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
)
func main() {
defer panics.HandlePanic(log, nil, nil)
err := config.Parse()
if err != nil {
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
_, fErr := fmt.Fprintf(os.Stderr, errString)
if fErr != nil {
panic(errString)
}
return
}
err = database.Connect(&config.ActiveConfig().KasparovFlags)
if err != nil {
panic(errors.Errorf("Error connecting to database: %s", err))
}
defer func() {
err := database.Close()
if err != nil {
panic(errors.Errorf("Error closing the database: %s", err))
}
}()
err = jsonrpc.Connect(&config.ActiveConfig().KasparovFlags)
if err != nil {
panic(errors.Errorf("Error connecting to servers: %s", err))
}
defer jsonrpc.Close()
shutdownServer := server.Start(config.ActiveConfig().HTTPListen)
defer shutdownServer()
interrupt := signal.InterruptListener()
<-interrupt
}

View File

@@ -1,9 +0,0 @@
package server
import "github.com/kaspanet/kaspad/util/panics"
import "github.com/kaspanet/kaspad/kasparov/logger"
var (
log = logger.Logger("REST")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1,178 +0,0 @@
package server
import (
"fmt"
"net/http"
"strconv"
"github.com/kaspanet/kaspad/httpserverutils"
"github.com/kaspanet/kaspad/kasparov/kasparovd/controllers"
"github.com/pkg/errors"
"github.com/gorilla/mux"
)
const (
routeParamTxID = "txID"
routeParamTxHash = "txHash"
routeParamAddress = "address"
routeParamBlockHash = "blockHash"
)
const (
queryParamSkip = "skip"
queryParamLimit = "limit"
queryParamOrder = "order"
)
const (
defaultGetTransactionsLimit = 100
defaultGetBlocksLimit = 25
defaultGetBlocksOrder = controllers.OrderDescending
)
func mainHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string, _ []byte) (interface{}, error) {
return struct {
Message string `json:"message"`
}{
Message: "Kasparov server is running",
}, nil
}
func addRoutes(router *mux.Router) {
router.HandleFunc("/", httpserverutils.MakeHandler(mainHandler))
router.HandleFunc(
fmt.Sprintf("/transaction/id/{%s}", routeParamTxID),
httpserverutils.MakeHandler(getTransactionByIDHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transaction/hash/{%s}", routeParamTxHash),
httpserverutils.MakeHandler(getTransactionByHashHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/transactions/address/{%s}", routeParamAddress),
httpserverutils.MakeHandler(getTransactionsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/utxos/address/{%s}", routeParamAddress),
httpserverutils.MakeHandler(getUTXOsByAddressHandler)).
Methods("GET")
router.HandleFunc(
fmt.Sprintf("/block/{%s}", routeParamBlockHash),
httpserverutils.MakeHandler(getBlockByHashHandler)).
Methods("GET")
router.HandleFunc(
"/blocks",
httpserverutils.MakeHandler(getBlocksHandler)).
Methods("GET")
router.HandleFunc(
"/fee-estimates",
httpserverutils.MakeHandler(getFeeEstimatesHandler)).
Methods("GET")
router.HandleFunc(
"/transaction",
httpserverutils.MakeHandler(postTransactionHandler)).
Methods("POST")
}
func convertQueryParamToInt(queryParams map[string]string, param string, defaultValue int) (int, error) {
if _, ok := queryParams[param]; ok {
intValue, err := strconv.Atoi(queryParams[param])
if err != nil {
errorMessage := fmt.Sprintf("Couldn't parse the '%s' query parameter", param)
return 0, httpserverutils.NewHandlerErrorWithCustomClientMessage(
http.StatusUnprocessableEntity,
errors.Wrap(err, errorMessage),
errorMessage)
}
return intValue, nil
}
return defaultValue, nil
}
func getTransactionByIDHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetTransactionByIDHandler(routeParams[routeParamTxID])
}
func getTransactionByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetTransactionByHashHandler(routeParams[routeParamTxHash])
}
func getTransactionsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, queryParams map[string]string,
_ []byte) (interface{}, error) {
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if err != nil {
return nil, err
}
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetTransactionsLimit)
if err != nil {
return nil, err
}
if _, ok := queryParams[queryParamLimit]; ok {
var err error
limit, err = strconv.Atoi(queryParams[queryParamLimit])
if err != nil {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity,
errors.Wrap(err, fmt.Sprintf("Couldn't parse the '%s' query parameter", queryParamLimit)))
}
}
return controllers.GetTransactionsByAddressHandler(routeParams[routeParamAddress], uint64(skip), uint64(limit))
}
func getUTXOsByAddressHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetUTXOsByAddressHandler(routeParams[routeParamAddress])
}
func getBlockByHashHandler(_ *httpserverutils.ServerContext, _ *http.Request, routeParams map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetBlockByHashHandler(routeParams[routeParamBlockHash])
}
func getFeeEstimatesHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
_ []byte) (interface{}, error) {
return controllers.GetFeeEstimatesHandler()
}
func getBlocksHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, queryParams map[string]string,
_ []byte) (interface{}, error) {
skip, err := convertQueryParamToInt(queryParams, queryParamSkip, 0)
if err != nil {
return nil, err
}
limit, err := convertQueryParamToInt(queryParams, queryParamLimit, defaultGetBlocksLimit)
if err != nil {
return nil, err
}
order := defaultGetBlocksOrder
if orderParamValue, ok := queryParams[queryParamOrder]; ok {
if orderParamValue != controllers.OrderAscending && orderParamValue != controllers.OrderDescending {
return nil, httpserverutils.NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf(
"Couldn't parse the '%s' query parameter", queryParamOrder))
}
order = orderParamValue
}
return controllers.GetBlocksHandler(order, uint64(skip), uint64(limit))
}
func postTransactionHandler(_ *httpserverutils.ServerContext, _ *http.Request, _ map[string]string, _ map[string]string,
requestBody []byte) (interface{}, error) {
return nil, controllers.PostTransaction(requestBody)
}

View File

@@ -1,40 +0,0 @@
package server
import (
"context"
"github.com/kaspanet/kaspad/httpserverutils"
"net/http"
"time"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
const gracefulShutdownTimeout = 30 * time.Second
// Start starts the HTTP REST server and returns a
// function to gracefully shutdown it.
func Start(listenAddr string) func() {
router := mux.NewRouter()
router.Use(httpserverutils.AddRequestMetadataMiddleware)
router.Use(httpserverutils.RecoveryMiddleware)
router.Use(httpserverutils.LoggingMiddleware)
router.Use(httpserverutils.SetJSONMiddleware)
addRoutes(router)
httpServer := &http.Server{
Addr: listenAddr,
Handler: handlers.CORS()(router),
}
spawn(func() {
log.Errorf("%s", httpServer.ListenAndServe())
})
return func() {
ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownTimeout)
defer cancel()
err := httpServer.Shutdown(ctx)
if err != nil {
log.Errorf("Error shutting down HTTP server: %s", err)
}
}
}

View File

@@ -1,55 +0,0 @@
package config
import (
"github.com/jessevdk/go-flags"
"github.com/kaspanet/kaspad/kasparov/config"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
)
const (
logFilename = "kasparov_syncd.log"
errLogFilename = "kasparov_syncd_err.log"
)
var (
// Default configuration options
defaultLogDir = util.AppDataDir("kasparov_syncd", false)
activeConfig *Config
)
// ActiveConfig returns the active configuration struct
func ActiveConfig() *Config {
return activeConfig
}
// Config defines the configuration options for the sync daemon.
type Config struct {
Migrate bool `long:"migrate" description:"Migrate the database to the latest version. The daemon will not start when using this flag."`
MQTTBrokerAddress string `long:"mqttaddress" description:"MQTT broker address" required:"false"`
MQTTUser string `long:"mqttuser" description:"MQTT server user" required:"false"`
MQTTPassword string `long:"mqttpass" description:"MQTT server password" required:"false"`
config.KasparovFlags
}
// Parse parses the CLI arguments and returns a config struct.
func Parse() error {
activeConfig = &Config{}
parser := flags.NewParser(activeConfig, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return err
}
err = activeConfig.ResolveKasparovFlags(parser, defaultLogDir, logFilename, errLogFilename)
if err != nil {
return err
}
if (activeConfig.MQTTBrokerAddress != "" || activeConfig.MQTTUser != "" || activeConfig.MQTTPassword != "") &&
(activeConfig.MQTTBrokerAddress == "" || activeConfig.MQTTUser == "" || activeConfig.MQTTPassword == "") {
return errors.New("--mqttaddress, --mqttuser, and --mqttpass must be passed all together")
}
return nil
}

View File

@@ -1,29 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd kasparov/kasparovsyncd && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o kasparov-syncd .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/kasparov/kasparovsyncd/ /app/
COPY --from=build /go/src/github.com/kaspanet/kaspad/kasparov/database/migrations/ /database/migrations/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/kasparov-syncd"]

View File

@@ -1,11 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/kasparov/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var (
log = logger.Logger("KVSD")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1,76 +0,0 @@
package main
import (
"fmt"
"os"
_ "github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/kaspanet/kaspad/kasparov/database"
"github.com/kaspanet/kaspad/kasparov/jsonrpc"
"github.com/kaspanet/kaspad/kasparov/kasparovsyncd/config"
"github.com/kaspanet/kaspad/kasparov/kasparovsyncd/mqtt"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
"github.com/pkg/errors"
)
func main() {
defer panics.HandlePanic(log, nil, nil)
err := config.Parse()
if err != nil {
errString := fmt.Sprintf("Error parsing command-line arguments: %s", err)
_, fErr := fmt.Fprintf(os.Stderr, errString)
if fErr != nil {
panic(errString)
}
return
}
if config.ActiveConfig().Migrate {
err := database.Migrate(&config.ActiveConfig().KasparovFlags)
if err != nil {
panic(errors.Errorf("Error migrating database: %s", err))
}
return
}
err = database.Connect(&config.ActiveConfig().KasparovFlags)
if err != nil {
panic(errors.Errorf("Error connecting to database: %s", err))
}
defer func() {
err := database.Close()
if err != nil {
panic(errors.Errorf("Error closing the database: %s", err))
}
}()
err = mqtt.Connect()
if err != nil {
panic(errors.Errorf("Error connecting to MQTT: %s", err))
}
defer mqtt.Close()
err = jsonrpc.Connect(&config.ActiveConfig().KasparovFlags)
if err != nil {
panic(errors.Errorf("Error connecting to servers: %s", err))
}
defer jsonrpc.Close()
doneChan := make(chan struct{}, 1)
spawn(func() {
err := startSync(doneChan)
if err != nil {
panic(err)
}
})
interrupt := signal.InterruptListener()
<-interrupt
// Gracefully stop syncing
doneChan <- struct{}{}
}

View File

@@ -1,9 +0,0 @@
package mqtt
import "github.com/kaspanet/kaspad/util/panics"
import "github.com/kaspanet/kaspad/kasparov/logger"
var (
log = logger.Logger("MQTT")
spawn = panics.GoroutineWrapperFunc(log)
)

View File

@@ -1,75 +0,0 @@
package mqtt
import (
"encoding/json"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/kaspanet/kaspad/kasparov/kasparovsyncd/config"
"github.com/pkg/errors"
)
// client is an instance of the MQTT client, in case we have an active connection
var client mqtt.Client
const (
qualityOfService = 2
quiesceMilliseconds = 250
)
// GetClient returns an instance of the MQTT client, in case we have an active connection
func GetClient() (mqtt.Client, error) {
if client == nil {
return nil, errors.New("MQTT is not connected")
}
return client, nil
}
func isConnected() bool {
return client != nil
}
// Connect initiates a connection to the MQTT server, if defined
func Connect() error {
cfg := config.ActiveConfig()
if cfg.MQTTBrokerAddress == "" {
// MQTT broker not defined -- nothing to do
return nil
}
options := mqtt.NewClientOptions()
options.AddBroker(cfg.MQTTBrokerAddress)
options.SetUsername(cfg.MQTTUser)
options.SetPassword(cfg.MQTTPassword)
options.SetAutoReconnect(true)
newClient := mqtt.NewClient(options)
if token := newClient.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
client = newClient
return nil
}
// Close closes the connection to the MQTT server, if previously connected
func Close() {
if client == nil {
return
}
client.Disconnect(quiesceMilliseconds)
client = nil
}
func publish(topic string, data interface{}) error {
payload, err := json.Marshal(data)
if err != nil {
return err
}
token := client.Publish(topic, qualityOfService, false, payload)
token.Wait()
if token.Error() != nil {
return errors.WithStack(token.Error())
}
return nil
}

View File

@@ -1,17 +0,0 @@
package mqtt
import "github.com/kaspanet/kaspad/kasparov/kasparovd/controllers"
const selectedTipTopic = "dag/selected-tip"
// PublishSelectedTipNotification publishes notification for a new selected tip
func PublishSelectedTipNotification(selectedTipHash string) error {
if !isConnected() {
return nil
}
block, err := controllers.GetBlockByHashHandler(selectedTipHash)
if err != nil {
return err
}
return publish(selectedTipTopic, block)
}

View File

@@ -1,117 +0,0 @@
package mqtt
import (
"github.com/kaspanet/kaspad/kasparov/kasparovd/apimodels"
"github.com/kaspanet/kaspad/kasparov/kasparovd/controllers"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util/daghash"
"path"
)
// PublishTransactionsNotifications publishes notification for each transaction of the given block
func PublishTransactionsNotifications(rawTransactions []rpcmodel.TxRawResult) error {
if !isConnected() {
return nil
}
transactionIDs := make([]string, len(rawTransactions))
for i, tx := range rawTransactions {
transactionIDs[i] = tx.TxID
}
transactions, err := controllers.GetTransactionsByIDsHandler(transactionIDs)
if err != nil {
return err
}
for _, transaction := range transactions {
err = publishTransactionNotifications(transaction, "transactions")
if err != nil {
return err
}
}
return nil
}
func publishTransactionNotifications(transaction *apimodels.TransactionResponse, topic string) error {
addresses := uniqueAddressesForTransaction(transaction)
for _, address := range addresses {
err := publishTransactionNotificationForAddress(transaction, address, topic)
if err != nil {
return err
}
}
return nil
}
func uniqueAddressesForTransaction(transaction *apimodels.TransactionResponse) []string {
addressesMap := make(map[string]struct{})
addresses := []string{}
for _, output := range transaction.Outputs {
if _, exists := addressesMap[output.Address]; !exists {
addresses = append(addresses, output.Address)
addressesMap[output.Address] = struct{}{}
}
}
for _, input := range transaction.Inputs {
if _, exists := addressesMap[input.Address]; !exists {
addresses = append(addresses, input.Address)
addressesMap[input.Address] = struct{}{}
}
}
return addresses
}
func publishTransactionNotificationForAddress(transaction *apimodels.TransactionResponse, address string, topic string) error {
return publish(path.Join(topic, address), transaction)
}
// PublishAcceptedTransactionsNotifications publishes notification for each accepted transaction of the given chain-block
func PublishAcceptedTransactionsNotifications(addedChainBlocks []*rpcclient.ChainBlock) error {
for _, addedChainBlock := range addedChainBlocks {
for _, acceptedBlock := range addedChainBlock.AcceptedBlocks {
transactionIDs := make([]string, len(acceptedBlock.AcceptedTxIDs))
for i, acceptedTxID := range acceptedBlock.AcceptedTxIDs {
transactionIDs[i] = acceptedTxID.String()
}
transactions, err := controllers.GetTransactionsByIDsHandler(transactionIDs)
if err != nil {
return err
}
for _, transaction := range transactions {
err = publishTransactionNotifications(transaction, "transactions/accepted")
if err != nil {
return err
}
}
return nil
}
}
return nil
}
// PublishUnacceptedTransactionsNotifications publishes notification for each unaccepted transaction of the given chain-block
func PublishUnacceptedTransactionsNotifications(removedChainHashes []*daghash.Hash) error {
for _, removedHash := range removedChainHashes {
transactionIDs, err := controllers.GetAcceptedTransactionIDsByBlockHashHandler(removedHash.String())
if err != nil {
return err
}
transactions, err := controllers.GetTransactionsByIDsHandler(transactionIDs)
if err != nil {
return err
}
for _, transaction := range transactions {
err = publishTransactionNotifications(transaction, "transactions/unaccepted")
if err != nil {
return err
}
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,47 +0,0 @@
package logger
import (
"fmt"
"github.com/kaspanet/kaspad/logs"
"github.com/pkg/errors"
"os"
)
// BackendLog is the logging backend used to create all subsystem loggers.
var BackendLog = logs.NewBackend()
var loggers []logs.Logger
// InitLog attaches log file and error log file to the backend log.
func InitLog(logFile, errLogFile string) {
err := BackendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = BackendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}
// Logger returns a new logger for a particular subsystem that writes to
// BackendLog, and add it to a slice so it will be possible to access it
// later and change its log level
func Logger(subsystemTag string) logs.Logger {
logger := BackendLog.Logger(subsystemTag)
loggers = append(loggers, logger)
return logger
}
// SetLogLevels sets the logging level for all of the subsystems in Kasparov.
func SetLogLevels(level string) error {
lvl, ok := logs.LevelFromString(level)
if !ok {
return errors.Errorf("Invalid log level %s", level)
}
for _, logger := range loggers {
logger.SetLevel(lvl)
}
return nil
}

View File

@@ -1,65 +0,0 @@
package main
import (
"bufio"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/pkg/errors"
)
const instanceStateCodeActive = "16"
func getAddressList(cfg *config) ([]string, error) {
if cfg.AddressListPath != "" {
return getAddressListFromPath(cfg)
}
return getAddressListFromAWS(cfg)
}
func getAddressListFromAWS(cfg *config) ([]string, error) {
log.Infof("Getting hosts list for autoscaling group %s", cfg.AutoScalingGroup)
sess := session.Must(session.NewSession(&aws.Config{Region: aws.String(cfg.Region)}))
ec2Client := ec2.New(sess)
instances, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
&ec2.Filter{Name: aws.String("tag:aws:autoscaling:groupName"), Values: []*string{&cfg.AutoScalingGroup}},
&ec2.Filter{Name: aws.String("instance-state-code"), Values: []*string{aws.String(instanceStateCodeActive)}},
},
})
if err != nil {
return nil, errors.Wrap(err, "Error describing instances")
}
addressList := []string{}
for _, reservation := range instances.Reservations {
for _, instance := range reservation.Instances {
if instance.PrivateDnsName == nil {
continue
}
addressList = append(addressList, fmt.Sprintf("%s:%s", *instance.PrivateDnsName, dagconfig.DevNetParams.RPCPort))
}
}
return addressList, nil
}
func getAddressListFromPath(cfg *config) ([]string, error) {
file, err := os.Open(cfg.AddressListPath)
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
addressList := []string{}
for scanner.Scan() {
addressList = append(addressList, scanner.Text())
}
return addressList, nil
}

View File

@@ -1,38 +0,0 @@
package main
import (
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
type simulatorClient struct {
*rpcclient.Client
onBlockAdded chan struct{}
notifyForNewBlocks bool
}
func newSimulatorClient(address string, connCfg *rpcclient.ConnConfig) (*simulatorClient, error) {
client := &simulatorClient{
onBlockAdded: make(chan struct{}, 1),
}
notificationHandlers := &rpcclient.NotificationHandlers{
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
txs []*util.Tx) {
if client.notifyForNewBlocks {
client.onBlockAdded <- struct{}{}
}
},
}
var err error
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
if err != nil {
return nil, errors.Errorf("Error connecting to address %s: %s", address, err)
}
if err = client.NotifyBlocks(); err != nil {
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
}
return client, nil
}

View File

@@ -1,66 +0,0 @@
package main
import (
"path/filepath"
"github.com/kaspanet/kaspad/util"
"github.com/pkg/errors"
"github.com/jessevdk/go-flags"
)
const (
defaultLogFilename = "miningsimulator.log"
defaultErrLogFilename = "miningsimulator_err.log"
)
var (
// Default configuration options
defaultHomeDir = util.AppDataDir("miningsimulator", false)
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
)
type config struct {
AutoScalingGroup string `long:"autoscaling" description:"AWS AutoScalingGroup to use for address list"`
Region string `long:"region" description:"AWS region to use for address list"`
AddressListPath string `long:"addresslist" description:"Path to a list of nodes' JSON-RPC endpoints"`
CertificatePath string `long:"cert" description:"Path to certificate accepted by JSON-RPC endpoint"`
DisableTLS bool `long:"notls" description:"Disable TLS"`
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds)"`
}
func parseConfig() (*config, error) {
cfg := &config{}
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
_, err := parser.Parse()
if err != nil {
return nil, err
}
if cfg.CertificatePath == "" && !cfg.DisableTLS {
return nil, errors.New("--notls has to be disabled if --cert is used")
}
if cfg.CertificatePath != "" && cfg.DisableTLS {
return nil, errors.New("--cert should be omitted if --notls is used")
}
if (cfg.AutoScalingGroup == "" || cfg.Region == "") && cfg.AddressListPath == "" {
return nil, errors.New("Either (--autoscaling and --region) or --addresslist must be specified")
}
if (cfg.AutoScalingGroup != "" || cfg.Region != "") && cfg.AddressListPath != "" {
return nil, errors.New("Both (--autoscaling and --region) and --addresslist can't be specified at the same time")
}
if cfg.AutoScalingGroup != "" && cfg.Region == "" {
return nil, errors.New("If --autoscaling is specified --region must be specified as well")
}
initLog(defaultLogFile, defaultErrLogFile)
return cfg, nil
}

View File

@@ -1,149 +0,0 @@
package main
import (
"io/ioutil"
"time"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
)
type connectionManager struct {
addressList []string
cert []byte
clients []*simulatorClient
cfg *config
disconnectChan chan struct{}
}
func newConnectionManager(cfg *config) (*connectionManager, error) {
connManager := &connectionManager{
cfg: cfg,
}
var err error
connManager.addressList, err = getAddressList(cfg)
if err != nil {
return nil, err
}
connManager.cert, err = readCert(cfg)
if err != nil {
return nil, err
}
connManager.clients, err = connectToServers(connManager.addressList, connManager.cert)
if err != nil {
return nil, err
}
if cfg.AutoScalingGroup != "" {
connManager.disconnectChan = make(chan struct{})
spawn(func() { connManager.refreshAddressesLoop() })
}
return connManager, nil
}
func connectToServer(address string, cert []byte) (*simulatorClient, error) {
connCfg := &rpcclient.ConnConfig{
Host: address,
Endpoint: "ws",
User: "user",
Pass: "pass",
DisableTLS: cert == nil,
RequestTimeout: time.Second * 10,
Certificates: cert,
}
client, err := newSimulatorClient(address, connCfg)
if err != nil {
return nil, err
}
log.Infof("Connected to server %s", address)
return client, nil
}
func connectToServers(addressList []string, cert []byte) ([]*simulatorClient, error) {
clients := make([]*simulatorClient, 0, len(addressList))
for _, address := range addressList {
client, err := connectToServer(address, cert)
if err != nil {
return nil, err
}
clients = append(clients, client)
}
return clients, nil
}
func readCert(cfg *config) ([]byte, error) {
if cfg.DisableTLS {
return nil, nil
}
cert, err := ioutil.ReadFile(cfg.CertificatePath)
if err != nil {
return nil, errors.Errorf("Error reading certificates file: %s", err)
}
return cert, nil
}
func (cm *connectionManager) close() {
if cm.disconnectChan != nil {
cm.disconnectChan <- struct{}{}
}
for _, client := range cm.clients {
client.Disconnect()
}
}
const refreshAddressInterval = time.Minute * 10
func (cm *connectionManager) refreshAddressesLoop() {
for {
select {
case <-time.After(refreshAddressInterval):
err := cm.refreshAddresses()
if err != nil {
panic(err)
}
case <-cm.disconnectChan:
return
}
}
}
func (cm *connectionManager) refreshAddresses() error {
newAddressList, err := getAddressList(cm.cfg)
if err != nil {
return err
}
if len(newAddressList) == len(cm.addressList) {
return nil
}
outerLoop:
for _, newAddress := range newAddressList {
for _, oldAddress := range cm.addressList {
if newAddress == oldAddress {
continue outerLoop
}
}
client, err := connectToServer(newAddress, cm.cert)
if err != nil {
return err
}
cm.clients = append(cm.clients, client)
}
cm.addressList = newAddressList
return nil
}

View File

@@ -1,28 +0,0 @@
# -- multistage docker build: stage #1: build stage
FROM golang:1.13-alpine AS build
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
WORKDIR /go/src/github.com/kaspanet/kaspad
RUN apk add --no-cache curl git
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN cd mining/simulator && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o mining_simulator .
# --- multistage docker build: stage #2: runtime image
FROM alpine
WORKDIR /app
RUN apk add --no-cache tini
COPY --from=build /go/src/github.com/kaspanet/kaspad/mining/simulator/mining_simulator /app/
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/app/mining_simulator"]

View File

@@ -1,10 +0,0 @@
1. To build docker image invoke following command from btcd root directory:
docker build -t mining_simulator -f ./mining/simulator/docker/Dockerfile .
2. To run:
a. create folder ~/.btcd/mining_simulator with the following files:
rpc.cert - certificate file that all rpc nodes accept
addresses - list of node addresses in the format [hostname]:[port]. One node per line
b. run:
docker run -v ~/.btcd:/root/.btcd -t mining_simulator

View File

@@ -1,34 +0,0 @@
package main
import (
"fmt"
"github.com/kaspanet/kaspad/logs"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/kaspanet/kaspad/util/panics"
"os"
)
var (
backendLog = logs.NewBackend()
log = backendLog.Logger("MNSM")
spawn = panics.GoroutineWrapperFunc(log)
)
func initLog(logFile, errLogFile string) {
err := backendLog.AddLogFile(logFile, logs.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
os.Exit(1)
}
err = backendLog.AddLogFile(errLogFile, logs.LevelWarn)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
os.Exit(1)
}
}
func enableRPCLogging() {
rpclog := backendLog.Logger("RPCC")
rpclog.SetLevel(logs.LevelTrace)
rpcclient.UseLogger(rpclog)
}

View File

@@ -1,46 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/signal"
"github.com/kaspanet/kaspad/util/panics"
)
func main() {
defer panics.HandlePanic(log, nil, nil)
cfg, err := parseConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s", err)
os.Exit(1)
}
if cfg.Verbose {
enableRPCLogging()
}
connManager, err := newConnectionManager(cfg)
if err != nil {
panic(errors.Errorf("Error initializing connection manager: %s", err))
}
defer connManager.close()
spawn(func() {
err = mineLoop(connManager, cfg.BlockDelay)
if err != nil {
panic(errors.Errorf("Error in main loop: %s", err))
}
})
interrupt := signal.InterruptListener()
<-interrupt
}
func disconnect(clients []*simulatorClient) {
for _, client := range clients {
client.Disconnect()
}
}

View File

@@ -1,207 +0,0 @@
package main
import (
"encoding/hex"
nativeerrors "errors"
"math/rand"
"strconv"
"strings"
"time"
"github.com/kaspanet/kaspad/rpcclient"
"github.com/pkg/errors"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) {
// parse parent hashes
parentHashes := make([]*daghash.Hash, len(template.ParentHashes))
for i, parentHash := range template.ParentHashes {
hash, err := daghash.NewHashFromStr(parentHash)
if err != nil {
return nil, errors.Errorf("Error decoding hash %s: %s", parentHash, err)
}
parentHashes[i] = hash
}
// parse Bits
bitsInt64, err := strconv.ParseInt(template.Bits, 16, 32)
if err != nil {
return nil, errors.Errorf("Error decoding bits %s: %s", template.Bits, err)
}
bits := uint32(bitsInt64)
// parseAcceptedIDMerkleRoot
acceptedIDMerkleRoot, err := daghash.NewHashFromStr(template.AcceptedIDMerkleRoot)
if err != nil {
return nil, errors.Errorf("Error parsing acceptedIDMerkleRoot: %s", err)
}
utxoCommitment, err := daghash.NewHashFromStr(template.UTXOCommitment)
if err != nil {
return nil, errors.Errorf("Error parsing utxoCommitment: %s", err)
}
// parse rest of block
msgBlock := wire.NewMsgBlock(
wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{},
acceptedIDMerkleRoot, utxoCommitment, bits, 0))
for i, txResult := range append([]rpcmodel.GetBlockTemplateResultTx{*template.CoinbaseTxn}, template.Transactions...) {
reader := hex.NewDecoder(strings.NewReader(txResult.Data))
tx := &wire.MsgTx{}
if err := tx.KaspaDecode(reader, 0); err != nil {
return nil, errors.Errorf("Error decoding tx #%d: %s", i, err)
}
msgBlock.AddTransaction(tx)
}
block := util.NewBlock(msgBlock)
msgBlock.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
return block, nil
}
func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util.Block) {
msgBlock := block.MsgBlock()
targetDifficulty := util.CompactToBig(msgBlock.Header.Bits)
initialNonce := random.Uint64()
for i := random.Uint64(); i != initialNonce-1; i++ {
select {
case <-stopChan:
return
default:
msgBlock.Header.Nonce = i
hash := msgBlock.BlockHash()
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
foundBlock <- block
return
}
}
}
}
func getBlockTemplate(client *simulatorClient, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) {
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
}
func templatesLoop(client *simulatorClient, newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
longPollID := ""
getBlockTemplateLongPoll := func() {
if longPollID != "" {
log.Infof("Requesting template with longPollID '%s' from %s", longPollID, client.Host())
} else {
log.Infof("Requesting template without longPollID from %s", client.Host())
}
template, err := getBlockTemplate(client, longPollID)
if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) {
log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host())
return
} else if err != nil {
errChan <- errors.Errorf("Error getting block template: %s", err)
return
}
if template.LongPollID != longPollID {
log.Infof("Got new long poll template: %s", template.LongPollID)
longPollID = template.LongPollID
newTemplateChan <- template
}
}
getBlockTemplateLongPoll()
for {
select {
case <-stopChan:
close(newTemplateChan)
return
case <-client.onBlockAdded:
getBlockTemplateLongPoll()
case <-time.Tick(500 * time.Millisecond):
getBlockTemplateLongPoll()
}
}
}
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
var stopOldTemplateSolving chan struct{}
for template := range newTemplateChan {
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
stopOldTemplateSolving = make(chan struct{})
block, err := parseBlock(template)
if err != nil {
errChan <- errors.Errorf("Error parsing block: %s", err)
return
}
go solveBlock(block, stopOldTemplateSolving, foundBlock)
}
if stopOldTemplateSolving != nil {
close(stopOldTemplateSolving)
}
}
func mineNextBlock(client *simulatorClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
go templatesLoop(client, newTemplateChan, errChan, templateStopChan)
go solveLoop(newTemplateChan, foundBlock, errChan)
}
func handleFoundBlock(client *simulatorClient, block *util.Block) error {
log.Infof("Found block %s with parents %s! Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host())
err := client.SubmitBlock(block, &rpcmodel.SubmitBlockOptions{})
if err != nil {
return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err)
}
return nil
}
func getRandomClient(clients []*simulatorClient) *simulatorClient {
clientsCount := int64(len(clients))
if clientsCount == 1 {
return clients[0]
}
return clients[random.Int63n(clientsCount)]
}
func mineLoop(connManager *connectionManager, blockDelay uint64) error {
errChan := make(chan error)
templateStopChan := make(chan struct{})
spawn(func() {
for {
foundBlock := make(chan *util.Block)
currentClient := getRandomClient(connManager.clients)
currentClient.notifyForNewBlocks = true
log.Infof("Next block will be mined by: %s", currentClient.Host())
mineNextBlock(currentClient, foundBlock, templateStopChan, errChan)
block, ok := <-foundBlock
if !ok {
errChan <- nil
return
}
currentClient.notifyForNewBlocks = false
templateStopChan <- struct{}{}
spawn(func() {
if blockDelay != 0 {
time.Sleep(time.Duration(blockDelay) * time.Millisecond)
}
err := handleFoundBlock(currentClient, block)
if err != nil {
errChan <- err
}
})
}
})
err := <-errChan
return err
}