mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-11-23 22:15:54 +00:00
248 lines
6.4 KiB
Go
248 lines
6.4 KiB
Go
package main
|
|
|
|
import (
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"github.com/kaspanet/kaspad/app/appmessage"
|
|
"github.com/kaspanet/kaspad/domain/consensus"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model"
|
|
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
|
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
|
"github.com/kaspanet/kaspad/infrastructure/config"
|
|
"github.com/kaspanet/kaspad/infrastructure/db/database"
|
|
"github.com/kaspanet/kaspad/infrastructure/network/rpcclient"
|
|
"github.com/kaspanet/kaspad/util/profiling"
|
|
"github.com/kaspanet/kaspad/version"
|
|
"github.com/pkg/errors"
|
|
)
|
|
|
|
func main() {
|
|
// defer panics.HandlePanic(log, "MAIN", nil)
|
|
|
|
cfg, err := parseConfig()
|
|
if err != nil {
|
|
printErrorAndExit(errors.Errorf("Error parsing command-line arguments: %s", err))
|
|
}
|
|
defer backendLog.Close()
|
|
|
|
// Show version at startup.
|
|
log.Infof("Version %s", version.Version())
|
|
|
|
// Enable http profiling server if requested.
|
|
if cfg.Profile != "" {
|
|
profiling.Start(cfg.Profile, log)
|
|
}
|
|
|
|
err = mainImpl(cfg)
|
|
if err != nil {
|
|
printErrorAndExit(err)
|
|
}
|
|
}
|
|
|
|
func mainImpl(cfg *configFlags) error {
|
|
appDir := config.CleanAndExpandPath(cfg.AppDir)
|
|
appDir = filepath.Join(appDir, cfg.NetParams().Name)
|
|
dataDir := filepath.Join(appDir, "datadir2")
|
|
|
|
consensusConfig := &consensus.Config{Params: *cfg.NetParams()}
|
|
factory := consensus.NewFactory()
|
|
factory.SetTestDataDir(dataDir)
|
|
factory.AutoSetActivePrefix(true)
|
|
tc, tearDownFunc, err := factory.NewTestConsensus(consensusConfig, "archiveexport")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer tearDownFunc(true)
|
|
|
|
rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
rpcClient, err := rpcclient.NewRPCClient(rpcAddress)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
rootsResp, err := rpcClient.GetPruningWindowRoots()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
ppHeaders, err := tc.PruningPointHeaders()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, root := range rootsResp.Roots {
|
|
log.Infof("Got root %s", root.PPRoots)
|
|
}
|
|
|
|
counterStart := time.Now()
|
|
counter := 0
|
|
for _, root := range rootsResp.Roots {
|
|
ppRoots, err := externalapi.NewDomainHashesFromStrings(root.PPRoots)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
log.Infof("Adding past of %s", ppRoots)
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if int(root.PPIndex-1) >= len(ppHeaders) {
|
|
continue
|
|
}
|
|
|
|
nextPP := ppHeaders[root.PPIndex-1]
|
|
|
|
blockToChild := make(map[externalapi.DomainHash]externalapi.DomainHash)
|
|
|
|
// TODO: Since GD data is not always available, we should extract the blue work from the header and use that for topological traversal
|
|
heap := tc.DAGTraversalManager().NewDownHeap(model.NewStagingArea())
|
|
for _, ppRoot := range ppRoots {
|
|
heap.Push(ppRoot)
|
|
}
|
|
|
|
visited := make(map[externalapi.DomainHash]struct{})
|
|
chunk := make([]*appmessage.ArchivalBlock, 0, 1000)
|
|
for heap.Len() > 0 {
|
|
hash := heap.Pop()
|
|
|
|
if _, ok := visited[*hash]; ok {
|
|
continue
|
|
}
|
|
visited[*hash] = struct{}{}
|
|
|
|
// TODO: Use header data instead of GD data
|
|
blockGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), model.NewStagingArea(), hash, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if blockGHOSTDAGData.BlueWork().Cmp(nextPP.BlueWork()) <= 0 {
|
|
break
|
|
}
|
|
|
|
block, err := tc.BlockStore().Block(tc.DatabaseContext(), model.NewStagingArea(), hash)
|
|
if database.IsNotFoundError(err) {
|
|
continue
|
|
}
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
archivalBlock := &appmessage.ArchivalBlock{
|
|
Block: appmessage.DomainBlockToRPCBlock(block),
|
|
}
|
|
if child, ok := blockToChild[*hash]; ok {
|
|
archivalBlock.Child = child.String()
|
|
}
|
|
|
|
acceptanceData, err := tc.AcceptanceDataStore().Get(tc.DatabaseContext(), model.NewStagingArea(), hash)
|
|
isNotFoundErr := database.IsNotFoundError(err)
|
|
if !isNotFoundErr && err != nil {
|
|
return err
|
|
}
|
|
|
|
if blockGHOSTDAGData.SelectedParent() != model.VirtualGenesisBlockHash && !isNotFoundErr && len(acceptanceData) > 0 {
|
|
acceptanceDataRPC := make([]*appmessage.MergesetBlockAcceptanceData, 0, len(acceptanceData))
|
|
for _, data := range acceptanceData {
|
|
acceptedTxs := make([]*appmessage.AcceptedTxEntry, 0, len(data.TransactionAcceptanceData))
|
|
for i, tx := range data.TransactionAcceptanceData {
|
|
if !tx.IsAccepted {
|
|
continue
|
|
}
|
|
|
|
acceptedTxs = append(acceptedTxs, &appmessage.AcceptedTxEntry{
|
|
TransactionID: consensushashing.TransactionID(tx.Transaction).String(),
|
|
IndexWithinBlock: uint32(i),
|
|
})
|
|
}
|
|
|
|
acceptanceDataRPC = append(acceptanceDataRPC, &appmessage.MergesetBlockAcceptanceData{
|
|
BlockHash: data.BlockHash.String(),
|
|
AcceptedTxs: acceptedTxs,
|
|
})
|
|
}
|
|
|
|
archivalBlock.AcceptanceData = acceptanceDataRPC
|
|
archivalBlock.SelectedParent = blockGHOSTDAGData.SelectedParent().String()
|
|
}
|
|
|
|
chunk = append(chunk, archivalBlock)
|
|
|
|
if len(chunk) == 1 {
|
|
log.Infof("Added %s to chunk", consensushashing.BlockHash(block))
|
|
}
|
|
|
|
if len(chunk) == cap(chunk) {
|
|
err := sendChunk(rpcClient, chunk)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
counter += len(chunk)
|
|
counterDuration := time.Since(counterStart)
|
|
if counterDuration > 10*time.Second {
|
|
rate := float64(counter) / counterDuration.Seconds()
|
|
log.Infof("Sent %d blocks in the last %.2f seconds (%.2f blocks/second)", counter, counterDuration.Seconds(), rate)
|
|
counterStart = time.Now()
|
|
counter = 0
|
|
}
|
|
|
|
chunk = chunk[:0]
|
|
}
|
|
|
|
for _, parent := range block.Header.DirectParents() {
|
|
heap.Push(parent)
|
|
blockToChild[*parent] = *hash
|
|
}
|
|
}
|
|
|
|
if len(chunk) > 0 {
|
|
sendChunk(rpcClient, chunk)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func sendChunk(rpcClient *rpcclient.RPCClient, chunk []*appmessage.ArchivalBlock) error {
|
|
log.Infof("Sending chunk")
|
|
_, err := rpcClient.AddArchivalBlocks(chunk)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
log.Infof("Sent chunk")
|
|
|
|
// Checking existence of first block for sanity
|
|
block := chunk[0]
|
|
domainBlock, err := appmessage.RPCBlockToDomainBlock(block.Block)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
blockHash := consensushashing.BlockHash(domainBlock)
|
|
log.Infof("Checking block %s", blockHash)
|
|
resp, err := rpcClient.GetBlock(blockHash.String(), true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if len(resp.Block.Transactions) == 0 {
|
|
return errors.Errorf("Block %s has no transactions on the server", blockHash)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func printErrorAndExit(err error) {
|
|
fmt.Fprintf(os.Stderr, "%+v\n", err)
|
|
os.Exit(1)
|
|
}
|