mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00
[NOD-495] Remove miningsimulator to separate repository
This commit is contained in:
parent
b794254df6
commit
b4a38f8f60
@ -1,65 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const instanceStateCodeActive = "16"
|
||||
|
||||
func getAddressList(cfg *config) ([]string, error) {
|
||||
if cfg.AddressListPath != "" {
|
||||
return getAddressListFromPath(cfg)
|
||||
}
|
||||
return getAddressListFromAWS(cfg)
|
||||
}
|
||||
|
||||
func getAddressListFromAWS(cfg *config) ([]string, error) {
|
||||
log.Infof("Getting hosts list for autoscaling group %s", cfg.AutoScalingGroup)
|
||||
sess := session.Must(session.NewSession(&aws.Config{Region: aws.String(cfg.Region)}))
|
||||
ec2Client := ec2.New(sess)
|
||||
instances, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{Name: aws.String("tag:aws:autoscaling:groupName"), Values: []*string{&cfg.AutoScalingGroup}},
|
||||
&ec2.Filter{Name: aws.String("instance-state-code"), Values: []*string{aws.String(instanceStateCodeActive)}},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error describing instances")
|
||||
}
|
||||
|
||||
addressList := []string{}
|
||||
for _, reservation := range instances.Reservations {
|
||||
for _, instance := range reservation.Instances {
|
||||
if instance.PrivateDnsName == nil {
|
||||
continue
|
||||
}
|
||||
addressList = append(addressList, fmt.Sprintf("%s:%s", *instance.PrivateDnsName, dagconfig.DevNetParams.RPCPort))
|
||||
}
|
||||
}
|
||||
|
||||
return addressList, nil
|
||||
}
|
||||
|
||||
func getAddressListFromPath(cfg *config) ([]string, error) {
|
||||
file, err := os.Open(cfg.AddressListPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
addressList := []string{}
|
||||
for scanner.Scan() {
|
||||
addressList = append(addressList, scanner.Text())
|
||||
}
|
||||
|
||||
return addressList, nil
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type simulatorClient struct {
|
||||
*rpcclient.Client
|
||||
onBlockAdded chan struct{}
|
||||
notifyForNewBlocks bool
|
||||
}
|
||||
|
||||
func newSimulatorClient(address string, connCfg *rpcclient.ConnConfig) (*simulatorClient, error) {
|
||||
client := &simulatorClient{
|
||||
onBlockAdded: make(chan struct{}, 1),
|
||||
}
|
||||
notificationHandlers := &rpcclient.NotificationHandlers{
|
||||
OnFilteredBlockAdded: func(height uint64, header *wire.BlockHeader,
|
||||
txs []*util.Tx) {
|
||||
if client.notifyForNewBlocks {
|
||||
client.onBlockAdded <- struct{}{}
|
||||
}
|
||||
},
|
||||
}
|
||||
var err error
|
||||
client.Client, err = rpcclient.New(connCfg, notificationHandlers)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error connecting to address %s: %s", address, err)
|
||||
}
|
||||
|
||||
if err = client.NotifyBlocks(); err != nil {
|
||||
return nil, errors.Errorf("Error while registering client %s for block notifications: %s", client.Host(), err)
|
||||
}
|
||||
return client, nil
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogFilename = "miningsimulator.log"
|
||||
defaultErrLogFilename = "miningsimulator_err.log"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default configuration options
|
||||
defaultHomeDir = util.AppDataDir("miningsimulator", false)
|
||||
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
|
||||
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
|
||||
)
|
||||
|
||||
type config struct {
|
||||
AutoScalingGroup string `long:"autoscaling" description:"AWS AutoScalingGroup to use for address list"`
|
||||
Region string `long:"region" description:"AWS region to use for address list"`
|
||||
AddressListPath string `long:"addresslist" description:"Path to a list of nodes' JSON-RPC endpoints"`
|
||||
CertificatePath string `long:"cert" description:"Path to certificate accepted by JSON-RPC endpoint"`
|
||||
DisableTLS bool `long:"notls" description:"Disable TLS"`
|
||||
Verbose bool `long:"verbose" short:"v" description:"Enable logging of RPC requests"`
|
||||
BlockDelay uint64 `long:"block-delay" description:"Delay for block submission (in milliseconds)"`
|
||||
}
|
||||
|
||||
func parseConfig() (*config, error) {
|
||||
cfg := &config{}
|
||||
parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag)
|
||||
_, err := parser.Parse()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.CertificatePath == "" && !cfg.DisableTLS {
|
||||
return nil, errors.New("--notls has to be disabled if --cert is used")
|
||||
}
|
||||
|
||||
if cfg.CertificatePath != "" && cfg.DisableTLS {
|
||||
return nil, errors.New("--cert should be omitted if --notls is used")
|
||||
}
|
||||
|
||||
if (cfg.AutoScalingGroup == "" || cfg.Region == "") && cfg.AddressListPath == "" {
|
||||
return nil, errors.New("Either (--autoscaling and --region) or --addresslist must be specified")
|
||||
}
|
||||
|
||||
if (cfg.AutoScalingGroup != "" || cfg.Region != "") && cfg.AddressListPath != "" {
|
||||
return nil, errors.New("Both (--autoscaling and --region) and --addresslist can't be specified at the same time")
|
||||
}
|
||||
|
||||
if cfg.AutoScalingGroup != "" && cfg.Region == "" {
|
||||
return nil, errors.New("If --autoscaling is specified --region must be specified as well")
|
||||
}
|
||||
|
||||
initLog(defaultLogFile, defaultErrLogFile)
|
||||
|
||||
return cfg, nil
|
||||
}
|
@ -1,149 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type connectionManager struct {
|
||||
addressList []string
|
||||
cert []byte
|
||||
clients []*simulatorClient
|
||||
cfg *config
|
||||
disconnectChan chan struct{}
|
||||
}
|
||||
|
||||
func newConnectionManager(cfg *config) (*connectionManager, error) {
|
||||
connManager := &connectionManager{
|
||||
cfg: cfg,
|
||||
}
|
||||
var err error
|
||||
|
||||
connManager.addressList, err = getAddressList(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connManager.cert, err = readCert(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connManager.clients, err = connectToServers(connManager.addressList, connManager.cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.AutoScalingGroup != "" {
|
||||
connManager.disconnectChan = make(chan struct{})
|
||||
spawn(func() { connManager.refreshAddressesLoop() })
|
||||
}
|
||||
|
||||
return connManager, nil
|
||||
}
|
||||
|
||||
func connectToServer(address string, cert []byte) (*simulatorClient, error) {
|
||||
connCfg := &rpcclient.ConnConfig{
|
||||
Host: address,
|
||||
Endpoint: "ws",
|
||||
User: "user",
|
||||
Pass: "pass",
|
||||
DisableTLS: cert == nil,
|
||||
RequestTimeout: time.Second * 10,
|
||||
Certificates: cert,
|
||||
}
|
||||
|
||||
client, err := newSimulatorClient(address, connCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Connected to server %s", address)
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func connectToServers(addressList []string, cert []byte) ([]*simulatorClient, error) {
|
||||
clients := make([]*simulatorClient, 0, len(addressList))
|
||||
|
||||
for _, address := range addressList {
|
||||
client, err := connectToServer(address, cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clients = append(clients, client)
|
||||
}
|
||||
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
func readCert(cfg *config) ([]byte, error) {
|
||||
if cfg.DisableTLS {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cert, err := ioutil.ReadFile(cfg.CertificatePath)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error reading certificates file: %s", err)
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func (cm *connectionManager) close() {
|
||||
if cm.disconnectChan != nil {
|
||||
cm.disconnectChan <- struct{}{}
|
||||
}
|
||||
for _, client := range cm.clients {
|
||||
client.Disconnect()
|
||||
}
|
||||
}
|
||||
|
||||
const refreshAddressInterval = time.Minute * 10
|
||||
|
||||
func (cm *connectionManager) refreshAddressesLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(refreshAddressInterval):
|
||||
err := cm.refreshAddresses()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
case <-cm.disconnectChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *connectionManager) refreshAddresses() error {
|
||||
newAddressList, err := getAddressList(cm.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(newAddressList) == len(cm.addressList) {
|
||||
return nil
|
||||
}
|
||||
|
||||
outerLoop:
|
||||
for _, newAddress := range newAddressList {
|
||||
for _, oldAddress := range cm.addressList {
|
||||
if newAddress == oldAddress {
|
||||
continue outerLoop
|
||||
}
|
||||
}
|
||||
|
||||
client, err := connectToServer(newAddress, cm.cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cm.clients = append(cm.clients, client)
|
||||
}
|
||||
|
||||
cm.addressList = newAddressList
|
||||
|
||||
return nil
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
# -- multistage docker build: stage #1: build stage
|
||||
FROM golang:1.13-alpine AS build
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
WORKDIR /go/src/github.com/kaspanet/kaspad
|
||||
|
||||
RUN apk add --no-cache curl git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cd mining/simulator && CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o mining_simulator .
|
||||
|
||||
# --- multistage docker build: stage #2: runtime image
|
||||
FROM alpine
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --no-cache tini
|
||||
|
||||
COPY --from=build /go/src/github.com/kaspanet/kaspad/mining/simulator/mining_simulator /app/
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["/app/mining_simulator"]
|
@ -1,10 +0,0 @@
|
||||
1. To build docker image invoke following command from btcd root directory:
|
||||
docker build -t mining_simulator -f ./mining/simulator/docker/Dockerfile .
|
||||
|
||||
2. To run:
|
||||
a. create folder ~/.btcd/mining_simulator with the following files:
|
||||
rpc.cert - certificate file that all rpc nodes accept
|
||||
addresses - list of node addresses in the format [hostname]:[port]. One node per line
|
||||
b. run:
|
||||
docker run -v ~/.btcd:/root/.btcd -t mining_simulator
|
||||
|
@ -1,34 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kaspanet/kaspad/logs"
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
backendLog = logs.NewBackend()
|
||||
log = backendLog.Logger("MNSM")
|
||||
spawn = panics.GoroutineWrapperFunc(log)
|
||||
)
|
||||
|
||||
func initLog(logFile, errLogFile string) {
|
||||
err := backendLog.AddLogFile(logFile, logs.LevelTrace)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logs.LevelTrace, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = backendLog.AddLogFile(errLogFile, logs.LevelWarn)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logs.LevelWarn, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func enableRPCLogging() {
|
||||
rpclog := backendLog.Logger("RPCC")
|
||||
rpclog.SetLevel(logs.LevelTrace)
|
||||
rpcclient.UseLogger(rpclog)
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/signal"
|
||||
"github.com/kaspanet/kaspad/util/panics"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer panics.HandlePanic(log, nil, nil)
|
||||
cfg, err := parseConfig()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.Verbose {
|
||||
enableRPCLogging()
|
||||
}
|
||||
|
||||
connManager, err := newConnectionManager(cfg)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error initializing connection manager: %s", err))
|
||||
}
|
||||
defer connManager.close()
|
||||
|
||||
spawn(func() {
|
||||
err = mineLoop(connManager, cfg.BlockDelay)
|
||||
if err != nil {
|
||||
panic(errors.Errorf("Error in main loop: %s", err))
|
||||
}
|
||||
})
|
||||
|
||||
interrupt := signal.InterruptListener()
|
||||
<-interrupt
|
||||
}
|
||||
|
||||
func disconnect(clients []*simulatorClient) {
|
||||
for _, client := range clients {
|
||||
client.Disconnect()
|
||||
}
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
nativeerrors "errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kaspanet/kaspad/rpcclient"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/rpcmodel"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
func parseBlock(template *rpcmodel.GetBlockTemplateResult) (*util.Block, error) {
|
||||
// parse parent hashes
|
||||
parentHashes := make([]*daghash.Hash, len(template.ParentHashes))
|
||||
for i, parentHash := range template.ParentHashes {
|
||||
hash, err := daghash.NewHashFromStr(parentHash)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error decoding hash %s: %s", parentHash, err)
|
||||
}
|
||||
parentHashes[i] = hash
|
||||
}
|
||||
|
||||
// parse Bits
|
||||
bitsInt64, err := strconv.ParseInt(template.Bits, 16, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error decoding bits %s: %s", template.Bits, err)
|
||||
}
|
||||
bits := uint32(bitsInt64)
|
||||
|
||||
// parseAcceptedIDMerkleRoot
|
||||
acceptedIDMerkleRoot, err := daghash.NewHashFromStr(template.AcceptedIDMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error parsing acceptedIDMerkleRoot: %s", err)
|
||||
}
|
||||
utxoCommitment, err := daghash.NewHashFromStr(template.UTXOCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Error parsing utxoCommitment: %s", err)
|
||||
}
|
||||
// parse rest of block
|
||||
msgBlock := wire.NewMsgBlock(
|
||||
wire.NewBlockHeader(template.Version, parentHashes, &daghash.Hash{},
|
||||
acceptedIDMerkleRoot, utxoCommitment, bits, 0))
|
||||
|
||||
for i, txResult := range append([]rpcmodel.GetBlockTemplateResultTx{*template.CoinbaseTxn}, template.Transactions...) {
|
||||
reader := hex.NewDecoder(strings.NewReader(txResult.Data))
|
||||
tx := &wire.MsgTx{}
|
||||
if err := tx.KaspaDecode(reader, 0); err != nil {
|
||||
return nil, errors.Errorf("Error decoding tx #%d: %s", i, err)
|
||||
}
|
||||
msgBlock.AddTransaction(tx)
|
||||
}
|
||||
|
||||
block := util.NewBlock(msgBlock)
|
||||
msgBlock.Header.HashMerkleRoot = blockdag.BuildHashMerkleTreeStore(block.Transactions()).Root()
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func solveBlock(block *util.Block, stopChan chan struct{}, foundBlock chan *util.Block) {
|
||||
msgBlock := block.MsgBlock()
|
||||
targetDifficulty := util.CompactToBig(msgBlock.Header.Bits)
|
||||
initialNonce := random.Uint64()
|
||||
for i := random.Uint64(); i != initialNonce-1; i++ {
|
||||
select {
|
||||
case <-stopChan:
|
||||
return
|
||||
default:
|
||||
msgBlock.Header.Nonce = i
|
||||
hash := msgBlock.BlockHash()
|
||||
if daghash.HashToBig(hash).Cmp(targetDifficulty) <= 0 {
|
||||
foundBlock <- block
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getBlockTemplate(client *simulatorClient, longPollID string) (*rpcmodel.GetBlockTemplateResult, error) {
|
||||
return client.GetBlockTemplate([]string{"coinbasetxn"}, longPollID)
|
||||
}
|
||||
|
||||
func templatesLoop(client *simulatorClient, newTemplateChan chan *rpcmodel.GetBlockTemplateResult, errChan chan error, stopChan chan struct{}) {
|
||||
longPollID := ""
|
||||
getBlockTemplateLongPoll := func() {
|
||||
if longPollID != "" {
|
||||
log.Infof("Requesting template with longPollID '%s' from %s", longPollID, client.Host())
|
||||
} else {
|
||||
log.Infof("Requesting template without longPollID from %s", client.Host())
|
||||
}
|
||||
template, err := getBlockTemplate(client, longPollID)
|
||||
if nativeerrors.Is(err, rpcclient.ErrResponseTimedOut) {
|
||||
log.Infof("Got timeout while requesting template '%s' from %s", longPollID, client.Host())
|
||||
return
|
||||
} else if err != nil {
|
||||
errChan <- errors.Errorf("Error getting block template: %s", err)
|
||||
return
|
||||
}
|
||||
if template.LongPollID != longPollID {
|
||||
log.Infof("Got new long poll template: %s", template.LongPollID)
|
||||
longPollID = template.LongPollID
|
||||
newTemplateChan <- template
|
||||
}
|
||||
}
|
||||
getBlockTemplateLongPoll()
|
||||
for {
|
||||
select {
|
||||
case <-stopChan:
|
||||
close(newTemplateChan)
|
||||
return
|
||||
case <-client.onBlockAdded:
|
||||
getBlockTemplateLongPoll()
|
||||
case <-time.Tick(500 * time.Millisecond):
|
||||
getBlockTemplateLongPoll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func solveLoop(newTemplateChan chan *rpcmodel.GetBlockTemplateResult, foundBlock chan *util.Block, errChan chan error) {
|
||||
var stopOldTemplateSolving chan struct{}
|
||||
for template := range newTemplateChan {
|
||||
if stopOldTemplateSolving != nil {
|
||||
close(stopOldTemplateSolving)
|
||||
}
|
||||
stopOldTemplateSolving = make(chan struct{})
|
||||
block, err := parseBlock(template)
|
||||
if err != nil {
|
||||
errChan <- errors.Errorf("Error parsing block: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
go solveBlock(block, stopOldTemplateSolving, foundBlock)
|
||||
}
|
||||
if stopOldTemplateSolving != nil {
|
||||
close(stopOldTemplateSolving)
|
||||
}
|
||||
}
|
||||
|
||||
func mineNextBlock(client *simulatorClient, foundBlock chan *util.Block, templateStopChan chan struct{}, errChan chan error) {
|
||||
newTemplateChan := make(chan *rpcmodel.GetBlockTemplateResult)
|
||||
go templatesLoop(client, newTemplateChan, errChan, templateStopChan)
|
||||
go solveLoop(newTemplateChan, foundBlock, errChan)
|
||||
}
|
||||
|
||||
func handleFoundBlock(client *simulatorClient, block *util.Block) error {
|
||||
log.Infof("Found block %s with parents %s! Submitting to %s", block.Hash(), block.MsgBlock().Header.ParentHashes, client.Host())
|
||||
|
||||
err := client.SubmitBlock(block, &rpcmodel.SubmitBlockOptions{})
|
||||
if err != nil {
|
||||
return errors.Errorf("Error submitting block %s to %s: %s", block.Hash(), client.Host(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRandomClient(clients []*simulatorClient) *simulatorClient {
|
||||
clientsCount := int64(len(clients))
|
||||
if clientsCount == 1 {
|
||||
return clients[0]
|
||||
}
|
||||
return clients[random.Int63n(clientsCount)]
|
||||
}
|
||||
|
||||
func mineLoop(connManager *connectionManager, blockDelay uint64) error {
|
||||
errChan := make(chan error)
|
||||
|
||||
templateStopChan := make(chan struct{})
|
||||
|
||||
spawn(func() {
|
||||
for {
|
||||
foundBlock := make(chan *util.Block)
|
||||
currentClient := getRandomClient(connManager.clients)
|
||||
currentClient.notifyForNewBlocks = true
|
||||
log.Infof("Next block will be mined by: %s", currentClient.Host())
|
||||
mineNextBlock(currentClient, foundBlock, templateStopChan, errChan)
|
||||
block, ok := <-foundBlock
|
||||
if !ok {
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
currentClient.notifyForNewBlocks = false
|
||||
templateStopChan <- struct{}{}
|
||||
spawn(func() {
|
||||
if blockDelay != 0 {
|
||||
time.Sleep(time.Duration(blockDelay) * time.Millisecond)
|
||||
}
|
||||
err := handleFoundBlock(currentClient, block)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
err := <-errChan
|
||||
|
||||
return err
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user