mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 14:16:43 +00:00
[NOD-443] Fix API server unorphaning (#501)
* [NOD-443] Immediately request missing parents in API server sync * [NOD-443] Add rpc client log to api server * [NOD-443] Fix wrong ordering of pendingHeaders queue * [NOD-443] Fix error comparision at TestNewHashFromStr * [NOD-443] Make a separate handleMissingParentHeaders function * [NOD-443] Change log level * [NOD-443] Put handleMissingParentHeaders next to handleBlockHeader * [NOD-443] Make handleBlockAddedMsg function * [NOD-443] Make reusable missingParentsHashesStr string * [NOD-443] Remove redundant 's' * [NOD-443] Refactor to first get all blocks and then add them to database * [NOD-443] Rename variables and functions, and remove redundant logs * [NOD-443] Make fetchBlockAndMissingAncestors use block hash as an argument * [NOD-443] Add log only for first orphan block * [NOD-443] Fix wrong order of adding blocks to pendingBlocks * [NOD-443] Write logs for all orphans * [NOD-443] Log only missing parents that are not already fetched * [NOD-443] Rename rawVerboseBlockTuple -> rawVerboseBlock * [NOD-443] Make fetchBlock return *rawVerboseBlock * [NOD-443] Rename rawVerboseBlock -> rawAndVerboseBlock
This commit is contained in:
parent
8f0d98ef9b
commit
3ab861227d
16
apiserver/jsonrpc/log.go
Normal file
16
apiserver/jsonrpc/log.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package jsonrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/daglabs/btcd/apiserver/logger"
|
||||||
|
"github.com/daglabs/btcd/rpcclient"
|
||||||
|
"github.com/daglabs/btcd/util/panics"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
log = logger.BackendLog.Logger("RPCC")
|
||||||
|
spawn = panics.GoroutineWrapperFunc(log, logger.BackendLog)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rpcclient.UseLogger(log, logger.BackendLog)
|
||||||
|
}
|
@ -24,24 +24,8 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// pendingChainChangedMsgs holds chainChangedMsgs in order of arrival
|
||||||
// pendingBlockAddedMsgs holds blockAddedMsgs in order of arrival
|
var pendingChainChangedMsgs []*jsonrpc.ChainChangedMsg
|
||||||
pendingBlockAddedMsgs []*jsonrpc.BlockAddedMsg
|
|
||||||
|
|
||||||
// pendingChainChangedMsgs holds chainChangedMsgs in order of arrival
|
|
||||||
pendingChainChangedMsgs []*jsonrpc.ChainChangedMsg
|
|
||||||
|
|
||||||
// missingBlocks is a map between missing block hashes and the time
|
|
||||||
// they were first found to be missing. If a block is still missing
|
|
||||||
// after blockMissingTimeout then it gets re-requested from the node.
|
|
||||||
missingBlocks = make(map[string]time.Time)
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// blockMissingTimeout is the amount of time after which a missing block
|
|
||||||
// gets re-requested from the node.
|
|
||||||
blockMissingTimeout = time.Second * 10
|
|
||||||
)
|
|
||||||
|
|
||||||
// startSync keeps the node and the API server in sync. On start, it downloads
|
// startSync keeps the node and the API server in sync. On start, it downloads
|
||||||
// all data that's missing from the API server, and once it's done it keeps
|
// all data that's missing from the API server, and once it's done it keeps
|
||||||
@ -85,8 +69,7 @@ func sync(client *jsonrpc.Client, doneChan chan struct{}) error {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case blockAdded := <-client.OnBlockAdded:
|
case blockAdded := <-client.OnBlockAdded:
|
||||||
enqueueBlockAddedMsg(blockAdded)
|
err := handleBlockAddedMsg(client, blockAdded)
|
||||||
err := processBlockAddedMsgs(client)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -201,23 +184,27 @@ func findHashOfBluestBlock(mustBeChainBlock bool) (*string, error) {
|
|||||||
// fetchBlock downloads the serialized block and raw block data of
|
// fetchBlock downloads the serialized block and raw block data of
|
||||||
// the block with hash blockHash.
|
// the block with hash blockHash.
|
||||||
func fetchBlock(client *jsonrpc.Client, blockHash *daghash.Hash) (
|
func fetchBlock(client *jsonrpc.Client, blockHash *daghash.Hash) (
|
||||||
rawBlock string, verboseBlock *btcjson.GetBlockVerboseResult, err error) {
|
*rawAndVerboseBlock, error) {
|
||||||
|
log.Debugf("Getting block %s from the RPC server", blockHash)
|
||||||
msgBlock, err := client.GetBlock(blockHash, nil)
|
msgBlock, err := client.GetBlock(blockHash, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
writer := bytes.NewBuffer(make([]byte, 0, msgBlock.SerializeSize()))
|
writer := bytes.NewBuffer(make([]byte, 0, msgBlock.SerializeSize()))
|
||||||
err = msgBlock.Serialize(writer)
|
err = msgBlock.Serialize(writer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rawBlock = hex.EncodeToString(writer.Bytes())
|
rawBlock := hex.EncodeToString(writer.Bytes())
|
||||||
|
|
||||||
verboseBlock, err = client.GetBlockVerboseTx(blockHash, nil)
|
verboseBlock, err := client.GetBlockVerboseTx(blockHash, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return rawBlock, verboseBlock, nil
|
return &rawAndVerboseBlock{
|
||||||
|
rawBlock: rawBlock,
|
||||||
|
verboseBlock: verboseBlock,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addBlocks inserts data in the given rawBlocks and verboseBlocks pairwise
|
// addBlocks inserts data in the given rawBlocks and verboseBlocks pairwise
|
||||||
@ -319,12 +306,6 @@ func addBlock(client *jsonrpc.Client, rawBlock string, verboseBlock btcjson.GetB
|
|||||||
}
|
}
|
||||||
|
|
||||||
dbTx.Commit()
|
dbTx.Commit()
|
||||||
|
|
||||||
// If the block was previously missing, remove it from
|
|
||||||
// the missing blocks collection.
|
|
||||||
if _, ok := missingBlocks[verboseBlock.Hash]; ok {
|
|
||||||
delete(missingBlocks, verboseBlock.Hash)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -937,126 +918,106 @@ func updateAddedChainBlocks(dbTx *gorm.DB, addedBlock *btcjson.ChainBlock) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// enqueueBlockAddedMsg enqueues onBlockAdded messages to be handled later
|
type rawAndVerboseBlock struct {
|
||||||
func enqueueBlockAddedMsg(blockAdded *jsonrpc.BlockAddedMsg) {
|
rawBlock string
|
||||||
pendingBlockAddedMsgs = append(pendingBlockAddedMsgs, blockAdded)
|
verboseBlock *btcjson.GetBlockVerboseResult
|
||||||
}
|
}
|
||||||
|
|
||||||
// processBlockAddedMsgs processes all pending onBlockAdded messages.
|
func (r *rawAndVerboseBlock) String() string {
|
||||||
// Messages that cannot yet be processed are re-enqueued.
|
return r.verboseBlock.Hash
|
||||||
func processBlockAddedMsgs(client *jsonrpc.Client) error {
|
}
|
||||||
var unprocessedBlockAddedMsgs []*jsonrpc.BlockAddedMsg
|
|
||||||
for _, blockAdded := range pendingBlockAddedMsgs {
|
|
||||||
missingHashes, err := missingParentHashes(blockAdded)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("Could not resolve missing parents: %s", err)
|
|
||||||
}
|
|
||||||
for _, missingHash := range missingHashes {
|
|
||||||
err := handleMissingParent(client, missingHash)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Failed to handle missing parent block %s: %s",
|
|
||||||
missingHash, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(missingHashes) > 0 {
|
|
||||||
unprocessedBlockAddedMsgs = append(unprocessedBlockAddedMsgs, blockAdded)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
handleBlockAddedMsg(client, blockAdded)
|
func handleBlockAddedMsg(client *jsonrpc.Client, blockAdded *jsonrpc.BlockAddedMsg) error {
|
||||||
|
blocks, err := fetchBlockAndMissingAncestors(client, blockAdded.Header.BlockHash())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, block := range blocks {
|
||||||
|
err = addBlock(client, block.rawBlock, *block.verboseBlock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Infof("Added block %s", block.verboseBlock.Hash)
|
||||||
}
|
}
|
||||||
pendingBlockAddedMsgs = unprocessedBlockAddedMsgs
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleBlockAddedMsg(client *jsonrpc.Client, blockAdded *jsonrpc.BlockAddedMsg) {
|
func fetchBlockAndMissingAncestors(client *jsonrpc.Client, blockHash *daghash.Hash) ([]*rawAndVerboseBlock, error) {
|
||||||
hash := blockAdded.Header.BlockHash()
|
block, err := fetchBlock(client, blockHash)
|
||||||
log.Debugf("Getting block %s from the RPC server", hash)
|
|
||||||
rawBlock, verboseBlock, err := fetchBlock(client, hash)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("Could not fetch block %s: %s", hash, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = addBlock(client, rawBlock, *verboseBlock)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Could not insert block %s: %s", hash, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Infof("Added block %s", hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func missingParentHashes(blockAdded *jsonrpc.BlockAddedMsg) ([]string, error) {
|
|
||||||
db, err := database.DB()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
pendingBlocks := []*rawAndVerboseBlock{block}
|
||||||
|
blocksToAdd := make([]*rawAndVerboseBlock, 0)
|
||||||
|
blocksToAddSet := make(map[string]struct{})
|
||||||
|
for len(pendingBlocks) > 0 {
|
||||||
|
var currentBlock *rawAndVerboseBlock
|
||||||
|
currentBlock, pendingBlocks = pendingBlocks[0], pendingBlocks[1:]
|
||||||
|
missingHashes, err := missingParentHashes(currentBlock.verboseBlock.ParentHashes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blocksToPrependToPending := make([]*rawAndVerboseBlock, 0, len(missingHashes))
|
||||||
|
for _, missingHash := range missingHashes {
|
||||||
|
if _, ok := blocksToAddSet[missingHash]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hash, err := daghash.NewHashFromStr(missingHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
block, err := fetchBlock(client, hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blocksToPrependToPending = append(blocksToPrependToPending, block)
|
||||||
|
}
|
||||||
|
if len(blocksToPrependToPending) == 0 {
|
||||||
|
blocksToAddSet[currentBlock.verboseBlock.Hash] = struct{}{}
|
||||||
|
blocksToAdd = append(blocksToAdd, currentBlock)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Debugf("Found %s missing parents for block %s and fetched them", blocksToPrependToPending, currentBlock)
|
||||||
|
blocksToPrependToPending = append(blocksToPrependToPending, currentBlock)
|
||||||
|
pendingBlocks = append(blocksToPrependToPending, pendingBlocks...)
|
||||||
|
}
|
||||||
|
return blocksToAdd, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Collect all referenced parent hashes
|
func missingParentHashes(parentHashes []string) ([]string, error) {
|
||||||
hashesIn := make([]string, 0, len(blockAdded.Header.ParentHashes))
|
db, err := database.DB()
|
||||||
for _, hash := range blockAdded.Header.ParentHashes {
|
if err != nil {
|
||||||
hashesIn = append(hashesIn, hash.String())
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that all the parent hashes exist in the database
|
// Make sure that all the parent hashes exist in the database
|
||||||
var dbParentBlocks []dbmodels.Block
|
var dbParentBlocks []dbmodels.Block
|
||||||
dbResult := db.
|
dbResult := db.
|
||||||
Model(&dbmodels.Block{}).
|
Model(&dbmodels.Block{}).
|
||||||
Where("block_hash in (?)", hashesIn).
|
Where("block_hash in (?)", parentHashes).
|
||||||
Find(&dbParentBlocks)
|
Find(&dbParentBlocks)
|
||||||
dbErrors := dbResult.GetErrors()
|
dbErrors := dbResult.GetErrors()
|
||||||
if httpserverutils.HasDBError(dbErrors) {
|
if httpserverutils.HasDBError(dbErrors) {
|
||||||
return nil, httpserverutils.NewErrorFromDBErrors("failed to find parent blocks: ", dbErrors)
|
return nil, httpserverutils.NewErrorFromDBErrors("failed to find parent blocks: ", dbErrors)
|
||||||
}
|
}
|
||||||
if len(hashesIn) != len(dbParentBlocks) {
|
if len(parentHashes) != len(dbParentBlocks) {
|
||||||
// Some parent hashes are missing. Collect and return them
|
// Some parent hashes are missing. Collect and return them
|
||||||
var missingParentHashes []string
|
var missingHashes []string
|
||||||
outerLoop:
|
outerLoop:
|
||||||
for _, hash := range hashesIn {
|
for _, hash := range parentHashes {
|
||||||
for _, dbParentBlock := range dbParentBlocks {
|
for _, dbParentBlock := range dbParentBlocks {
|
||||||
if dbParentBlock.BlockHash == hash {
|
if dbParentBlock.BlockHash == hash {
|
||||||
continue outerLoop
|
continue outerLoop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
missingParentHashes = append(missingParentHashes, hash)
|
missingHashes = append(missingHashes, hash)
|
||||||
}
|
}
|
||||||
return missingParentHashes, nil
|
return missingHashes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleMissingParent handles missing parent block hashes as follows:
|
|
||||||
// a. If it's the first time we've encountered this block hash, add it to
|
|
||||||
// the missing blocks collection with time = now
|
|
||||||
// b. Otherwise, if time + blockMissingTimeout < now (that is to say,
|
|
||||||
// blockMissingTimeout had elapsed) then request the block from the
|
|
||||||
// node
|
|
||||||
func handleMissingParent(client *jsonrpc.Client, missingParentHash string) error {
|
|
||||||
firstMissingTime, ok := missingBlocks[missingParentHash]
|
|
||||||
if !ok {
|
|
||||||
log.Infof("Parent block %s is missing", missingParentHash)
|
|
||||||
missingBlocks[missingParentHash] = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if firstMissingTime.Add(blockMissingTimeout).Before(time.Now()) {
|
|
||||||
hash, err := daghash.NewHashFromStr(missingParentHash)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("Could not create hash: %s", err)
|
|
||||||
}
|
|
||||||
rawBlock, verboseBlock, err := fetchBlock(client, hash)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("Could not fetch block %s: %s", hash, err)
|
|
||||||
}
|
|
||||||
err = addBlock(client, rawBlock, *verboseBlock)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("Could not insert block %s: %s", hash, err)
|
|
||||||
}
|
|
||||||
log.Infof("Parent block %s was fetched after missing for over %s",
|
|
||||||
missingParentHash, blockMissingTimeout)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// enqueueChainChangedMsg enqueues onChainChanged messages to be handled later
|
// enqueueChainChangedMsg enqueues onChainChanged messages to be handled later
|
||||||
func enqueueChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) {
|
func enqueueChainChangedMsg(chainChanged *jsonrpc.ChainChangedMsg) {
|
||||||
pendingChainChangedMsgs = append(pendingChainChangedMsgs, chainChanged)
|
pendingChainChangedMsgs = append(pendingChainChangedMsgs, chainChanged)
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
package testutil
|
package testutil
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AreErrorsEqual returns whether errors have the same type
|
// AreErrorsEqual returns whether errors have the same type
|
||||||
// and same error string from .Error().
|
// and same error string from .Error().
|
||||||
func AreErrorsEqual(err1, err2 error) bool {
|
func AreErrorsEqual(err1, err2 error) bool {
|
||||||
@ -13,8 +9,5 @@ func AreErrorsEqual(err1, err2 error) bool {
|
|||||||
if err1 == nil && err2 != nil {
|
if err1 == nil && err2 != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if fmt.Sprintf("%T", err1) != fmt.Sprintf("%T", err2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return err1.Error() == err2.Error()
|
return err1.Error() == err2.Error()
|
||||||
}
|
}
|
||||||
|
@ -191,7 +191,7 @@ func Decode(dst *Hash, src string) error {
|
|||||||
var reversedHash Hash
|
var reversedHash Hash
|
||||||
_, err := hex.Decode(reversedHash[HashSize-hex.DecodedLen(len(srcBytes)):], srcBytes)
|
_, err := hex.Decode(reversedHash[HashSize-hex.DecodedLen(len(srcBytes)):], srcBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reverse copy from the temporary hash to destination. Because the
|
// Reverse copy from the temporary hash to destination. Because the
|
||||||
|
@ -7,6 +7,7 @@ package daghash
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"github.com/daglabs/btcd/testutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
@ -214,7 +215,7 @@ func TestNewHashFromStr(t *testing.T) {
|
|||||||
t.Logf("Running %d tests", len(tests))
|
t.Logf("Running %d tests", len(tests))
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
result, err := NewHashFromStr(test.in)
|
result, err := NewHashFromStr(test.in)
|
||||||
if err != test.err {
|
if !testutil.AreErrorsEqual(err, test.err) {
|
||||||
t.Errorf(unexpectedErrStr, i, err, test.err)
|
t.Errorf(unexpectedErrStr, i, err, test.err)
|
||||||
continue
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user