Compare commits

..

3 Commits

Author SHA1 Message Date
Mike Zak
811a2a9717 Add changelog for v0.8.10 2021-02-25 14:53:09 +02:00
Svarog
127fd065c5 Convert ProtocolError to value-type, so that it can be used withh errors.As + fix SubmitBlock ProtocolError condition (#1555)
* Fix condition from || to &&

* Convert ProtocolError to value-type, so that it can be used wihth errors.As

* Simplify condition further
2021-02-24 17:10:45 +02:00
Mike Zak
2a147fb46d Update to version 0.8.10 2021-02-24 13:24:22 +02:00
170 changed files with 1757 additions and 3365 deletions

View File

@@ -7,17 +7,19 @@ import (
"runtime"
"time"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/infrastructure/db/database/ldb"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
"github.com/kaspanet/kaspad/infrastructure/os/limits"
"github.com/kaspanet/kaspad/infrastructure/os/signal"
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/util/profiling"
"github.com/kaspanet/kaspad/version"
"github.com/kaspanet/kaspad/util/panics"
"github.com/kaspanet/kaspad/infrastructure/config"
"github.com/kaspanet/kaspad/infrastructure/os/execenv"
"github.com/kaspanet/kaspad/infrastructure/os/limits"
"github.com/kaspanet/kaspad/infrastructure/os/winservice"
)
const leveldbCacheSizeMiB = 256
@@ -49,7 +51,6 @@ func StartApp() error {
fmt.Fprintln(os.Stderr, err)
return err
}
defer logger.BackendLog.Close()
defer panics.HandlePanic(log, "MAIN", nil)
app := &kaspadApp{cfg: cfg}

View File

@@ -164,7 +164,11 @@ func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint {
func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) {
inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs))
for i, input := range rpcTransaction.Inputs {
transactionID, err := transactionid.FromString(input.PreviousOutpoint.TransactionID)
transactionIDBytes, err := hex.DecodeString(input.PreviousOutpoint.TransactionID)
if err != nil {
return nil, err
}
transactionID, err := transactionid.FromBytes(transactionIDBytes)
if err != nil {
return nil, err
}
@@ -194,11 +198,19 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
}
}
subnetworkID, err := subnetworks.FromString(rpcTransaction.SubnetworkID)
subnetworkIDBytes, err := hex.DecodeString(rpcTransaction.SubnetworkID)
if err != nil {
return nil, err
}
payloadHash, err := externalapi.NewDomainHashFromString(rpcTransaction.PayloadHash)
subnetworkID, err := subnetworks.FromBytes(subnetworkIDBytes)
if err != nil {
return nil, err
}
payloadHashBytes, err := hex.DecodeString(rpcTransaction.PayloadHash)
if err != nil {
return nil, err
}
payloadHash, err := externalapi.NewDomainHashFromByteSlice(payloadHashBytes)
if err != nil {
return nil, err
}
@@ -243,7 +255,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version},
}
}
subnetworkID := transaction.SubnetworkID.String()
subnetworkID := hex.EncodeToString(transaction.SubnetworkID[:])
payloadHash := transaction.PayloadHash.String()
payload := hex.EncodeToString(transaction.Payload)
return &RPCTransaction{

View File

@@ -55,7 +55,6 @@ type BlockVerboseData struct {
Bits string
Difficulty float64
ParentHashes []string
ChildrenHashes []string
SelectedParentHash string
BlueScore uint64
IsHeaderOnly bool
@@ -105,5 +104,4 @@ type ScriptPubKeyResult struct {
Hex string
Type string
Address string
Version uint16
}

View File

@@ -27,7 +27,6 @@ type GetBlockDAGInfoResponseMessage struct {
VirtualParentHashes []string
Difficulty float64
PastMedianTime int64
PruningPointHash string
Error *RPCError
}

View File

@@ -7,6 +7,8 @@ package app
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("KASD")
var log, _ = logger.Get(logger.SubsystemTags.KASD)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -2,6 +2,8 @@ package flowcontext
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -17,7 +17,7 @@ type RequestIBDBlocksContext interface {
Domain() domain.Domain
}
type handleRequestHeadersFlow struct {
type handleRequestBlocksFlow struct {
RequestIBDBlocksContext
incomingRoute, outgoingRoute *router.Route
peer *peer.Peer
@@ -27,7 +27,7 @@ type handleRequestHeadersFlow struct {
func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router.Route,
outgoingRoute *router.Route, peer *peer.Peer) error {
flow := &handleRequestHeadersFlow{
flow := &handleRequestBlocksFlow{
RequestIBDBlocksContext: context,
incomingRoute: incomingRoute,
outgoingRoute: outgoingRoute,
@@ -36,7 +36,7 @@ func HandleRequestHeaders(context RequestIBDBlocksContext, incomingRoute *router
return flow.start()
}
func (flow *handleRequestHeadersFlow) start() error {
func (flow *handleRequestBlocksFlow) start() error {
for {
lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute)
if err != nil {

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -129,10 +129,6 @@ type fakeRelayInvsContext struct {
rwLock sync.RWMutex
}
func (f *fakeRelayInvsContext) GetBlockChildren(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
}
func (f *fakeRelayInvsContext) OnPruningPointUTXOSetOverride() error {
return nil
}

View File

@@ -191,7 +191,7 @@ func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransact
continue
}
return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr)
return protocolerrors.Errorf(true, "rejected transaction %s", txID)
}
err = flow.broadcastAcceptedTransactions([]*externalapi.DomainTransactionID{txID})
if err != nil {

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -2,6 +2,8 @@ package peer
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("PROT")
var log, _ = logger.Get(logger.SubsystemTags.PROT)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -59,20 +59,8 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute,
sendVersionRoute, router.OutgoingRoute())
if err != nil {
// non-blocking read from channel
select {
case innerError := <-errChan:
if errors.Is(err, routerpkg.ErrRouteClosed) {
m.handleError(innerError, netConnection, router.OutgoingRoute())
} else {
log.Errorf("Peer %s sent invalid message: %s", netConnection, innerError)
m.handleError(err, netConnection, router.OutgoingRoute())
}
default:
m.handleError(err, netConnection, router.OutgoingRoute())
}
m.handleError(err, netConnection, router.OutgoingRoute())
return
}
defer m.context.RemoveFromPeers(peer)

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("RPCS")
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -2,6 +2,8 @@ package rpccontext
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("RPCS")
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -3,14 +3,13 @@ package rpccontext
import (
"encoding/hex"
"fmt"
"math"
"math/big"
"strconv"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/difficulty"
"github.com/pkg/errors"
"math"
"math/big"
"strconv"
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
@@ -48,11 +47,6 @@ func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, b
"invalid block")
}
childrenHashes, err := ctx.Domain.Consensus().GetBlockChildren(hash)
if err != nil {
return nil, err
}
result := &appmessage.BlockVerboseData{
Hash: hash.String(),
Version: blockHeader.Version(),
@@ -61,7 +55,6 @@ func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, b
AcceptedIDMerkleRoot: blockHeader.AcceptedIDMerkleRoot().String(),
UTXOCommitment: blockHeader.UTXOCommitment().String(),
ParentHashes: hashes.ToStrings(blockHeader.ParentHashes()),
ChildrenHashes: hashes.ToStrings(childrenHashes),
Nonce: blockHeader.Nonce(),
Time: blockHeader.TimeInMilliseconds(),
Bits: strconv.FormatInt(int64(blockHeader.Bits()), 16),
@@ -212,7 +205,6 @@ func (ctx *Context) buildTransactionVerboseOutputs(tx *externalapi.DomainTransac
output.Index = uint32(i)
output.Value = transactionOutput.Value
output.ScriptPubKey = &appmessage.ScriptPubKeyResult{
Version: transactionOutput.ScriptPublicKey.Version,
Address: encodedAddr,
Hex: hex.EncodeToString(transactionOutput.ScriptPublicKey.Script),
Type: scriptClass.String(),

View File

@@ -36,11 +36,5 @@ func HandleGetBlockDAGInfo(context *rpccontext.Context, _ *router.Router, _ appm
response.Difficulty = context.GetDifficultyRatio(virtualInfo.Bits, context.Config.ActiveNetParams)
response.PastMedianTime = virtualInfo.PastMedianTime
pruningPoint, err := context.Domain.Consensus().PruningPoint()
if err != nil {
return nil, err
}
response.PruningPointHash = pruningPoint.String()
return response, nil
}

View File

@@ -11,7 +11,7 @@ import (
const (
// maxBlocksInGetBlocksResponse is the max amount of blocks that are
// allowed in a GetBlocksResult.
maxBlocksInGetBlocksResponse = 1000
maxBlocksInGetBlocksResponse = 100
)
// HandleGetBlocks handles the respectively named RPC command
@@ -37,17 +37,6 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
Error: appmessage.RPCErrorf("Could not decode lowHash %s: %s", getBlocksRequest.LowHash, err),
}, nil
}
blockInfo, err := context.Domain.Consensus().GetBlockInfo(lowHash)
if err != nil {
return nil, err
}
if !blockInfo.Exists {
return &appmessage.GetBlocksResponseMessage{
Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash),
}, nil
}
}
// Get hashes between lowHash and virtualSelectedParent

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("RPCS")
var log, _ = logger.Get(logger.SubsystemTags.RPCS)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -1,31 +1,11 @@
Kaspad v0.9.0 - 2021-03-04
===========================
* Merge big subdags in pick virtual parents (#1574)
* Write in the reject message the tx rejection reason (#1573)
* Add nil checks for protowire (#1570)
* Increase getBlocks limit to 1000 (#1572)
* Return RPC error if getBlock's lowHash doesn't exist (#1569)
* Add default dns-seeder to testnet (#1568)
* Fix utxoindex deserialization (#1566)
* Add pruning point hash to GetBlockDagInfo response (#1565)
* Use EmitUnpopulated so that kaspactl prints all fields, even the default ones (#1561)
* Stop logging an error whenever an RPC/P2P connection is canceled (#1562)
* Cleanup the logger and make it asynchronous (#1524)
* Close all iterators (#1542)
* Add childrenHashes to GetBlock/s RPC commands (#1560)
* Add ScriptPublicKey.Version to RPC (#1559)
* Fix the target block rate to create less bursty mining (#1554)
Kaspad v0.8.10 - 2021-02-25
===========================
* Fix bug where invalid mempool transactions were not removed (#1551)
* Add RPC reconnection to the miner (#1552)
* Remove virtual diff parents - only selectedTip is virtualDiffParent now (#1550)
* Fix UTXO index (#1548)
* Prevent fast failing (#1545)
* Increase the sleep time in kaspaminer when the node is not synced (#1544)
* Disallow header only blocks on RPC, relay and when requesting IBD full blocks (#1537)
* Make templateManager hold a DomainBlock and isSynced bool instead of a GetBlockTemplateResponseMessage (#1538)
[*] Fix bug where invalid mempool transactions were not removed (#1551)
[*] Add RPC reconnection to the miner (#1552)
[*] Remove virtual diff parents - only selectedTip is virtualDiffParent now (#1550)
[*] Fix UTXO index (#1548)
[*] Prevent fast failing (#1545)
[*] Increase the sleep time in kaspaminer when the node is not synced (#1544)
[*] Disallow header only blocks on RPC, relay and when requesting IBD full blocks (#1537)
[*] Make templateManager hold a DomainBlock and isSynced bool instead of a GetBlockTemplateResponseMessage (#1538)

View File

@@ -2,11 +2,10 @@ package main
import (
"fmt"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
"os"
"time"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
"github.com/pkg/errors"
"google.golang.org/protobuf/encoding/protojson"
@@ -68,7 +67,7 @@ func postCommand(cfg *configFlags, client *grpcclient.GRPCClient, responseChan c
if err != nil {
printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err))
}
responseBytes, err := protojson.MarshalOptions{EmitUnpopulated: true}.Marshal(response)
responseBytes, err := protojson.Marshal(response)
if err != nil {
printErrorAndExit(errors.Wrapf(err, "error parsing the response from the RPC server").Error())
}
@@ -93,7 +92,6 @@ func prettifyResponse(response string) string {
marshalOptions := &protojson.MarshalOptions{}
marshalOptions.Indent = " "
marshalOptions.EmitUnpopulated = true
return marshalOptions.Format(kaspadMessage)
}

View File

@@ -14,7 +14,6 @@ var (
)
func initLog(logFile, errLogFile string) {
log.SetLevel(logger.LevelDebug)
err := backendLog.AddLogFile(logFile, logger.LevelTrace)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logger.LevelTrace, err)
@@ -25,15 +24,4 @@ func initLog(logFile, errLogFile string) {
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logger.LevelWarn, err)
os.Exit(1)
}
err = backendLog.AddLogWriter(os.Stdout, logger.LevelInfo)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", logger.LevelWarn, err)
os.Exit(1)
}
err = backendLog.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err)
os.Exit(1)
}
}

View File

@@ -26,7 +26,6 @@ func main() {
fmt.Fprintf(os.Stderr, "Error parsing command-line arguments: %s\n", err)
os.Exit(1)
}
defer backendLog.Close()
// Show version at startup.
log.Infof("Version %s", version.Version())

View File

@@ -53,8 +53,6 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond
}
blockInWindowIndex := 0
sleepTime := 0 * time.Second
for {
foundBlockChan <- mineNextBlock(mineWhenNotSynced)
@@ -63,16 +61,15 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond
if blockInWindowIndex == windowSize-1 {
deviation := windowExpectedEndTime.Sub(time.Now())
if deviation > 0 {
sleepTime = deviation / windowSize
log.Infof("Finished to mine %d blocks %s earlier than expected. Setting the miner "+
"to sleep %s between blocks to compensate",
windowSize, deviation, sleepTime)
log.Infof("Finished to mine %d blocks %s earlier than expected. Sleeping %s to compensate",
windowSize, deviation, deviation)
time.Sleep(deviation)
}
blockInWindowIndex = 0
windowExpectedEndTime = time.Now().Add(expectedDurationForWindow)
}
time.Sleep(sleepTime)
}
}
})

View File

@@ -157,15 +157,6 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
return blockInfo, nil
}
func (s *consensus) GetBlockChildren(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
blockRelation, err := s.blockRelationStore.BlockRelation(s.databaseContext, blockHash)
if err != nil {
return nil, err
}
return blockRelation.Children, nil
}
func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
s.lock.Lock()
defer s.lock.Unlock()

View File

@@ -3,40 +3,25 @@ package database
import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
)
type dbCursor struct {
cursor database.Cursor
isClosed bool
cursor database.Cursor
}
func (d dbCursor) Next() bool {
if d.isClosed {
panic("Tried using a closed DBCursor")
}
return d.cursor.Next()
}
func (d dbCursor) First() bool {
if d.isClosed {
panic("Tried using a closed DBCursor")
}
return d.cursor.First()
}
func (d dbCursor) Seek(key model.DBKey) error {
if d.isClosed {
return errors.New("Tried using a closed DBCursor")
}
return d.cursor.Seek(dbKeyToDatabaseKey(key))
}
func (d dbCursor) Key() (model.DBKey, error) {
if d.isClosed {
return nil, errors.New("Tried using a closed DBCursor")
}
key, err := d.cursor.Key()
if err != nil {
return nil, err
@@ -46,23 +31,11 @@ func (d dbCursor) Key() (model.DBKey, error) {
}
func (d dbCursor) Value() ([]byte, error) {
if d.isClosed {
return nil, errors.New("Tried using a closed DBCursor")
}
return d.cursor.Value()
}
func (d dbCursor) Close() error {
if d.isClosed {
return errors.New("Tried using a closed DBCursor")
}
d.isClosed = true
err := d.cursor.Close()
if err != nil {
return err
}
d.cursor = nil
return nil
return d.cursor.Close()
}
func newDBCursor(cursor database.Cursor) model.DBCursor {

View File

@@ -9,7 +9,6 @@ func utxoCollectionToDBUTXOCollection(utxoCollection externalapi.UTXOCollection)
items := make([]*DbUtxoCollectionItem, utxoCollection.Len())
i := 0
utxoIterator := utxoCollection.Iterator()
defer utxoIterator.Close()
for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() {
outpoint, entry, err := utxoIterator.Get()
if err != nil {

View File

@@ -7,7 +7,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/pkg/errors"
)
var bucket = database.MakeBucket([]byte("blocks"))
@@ -215,28 +214,18 @@ func (bs *blockStore) serializeBlockCount(count uint64) ([]byte, error) {
}
type allBlockHashesIterator struct {
cursor model.DBCursor
isClosed bool
cursor model.DBCursor
}
func (a allBlockHashesIterator) First() bool {
if a.isClosed {
panic("Tried using a closed AllBlockHashesIterator")
}
return a.cursor.First()
}
func (a allBlockHashesIterator) Next() bool {
if a.isClosed {
panic("Tried using a closed AllBlockHashesIterator")
}
return a.cursor.Next()
}
func (a allBlockHashesIterator) Get() (*externalapi.DomainHash, error) {
if a.isClosed {
return nil, errors.New("Tried using a closed AllBlockHashesIterator")
}
key, err := a.cursor.Key()
if err != nil {
return nil, err
@@ -246,19 +235,6 @@ func (a allBlockHashesIterator) Get() (*externalapi.DomainHash, error) {
return externalapi.NewDomainHashFromByteSlice(blockHashBytes)
}
func (a allBlockHashesIterator) Close() error {
if a.isClosed {
return errors.New("Tried using a closed AllBlockHashesIterator")
}
a.isClosed = true
err := a.cursor.Close()
if err != nil {
return err
}
a.cursor = nil
return nil
}
func (bs *blockStore) AllBlockHashesIterator(dbContext model.DBReader) (model.BlockIterator, error) {
cursor, err := dbContext.Cursor(bucket)
if err != nil {

View File

@@ -37,7 +37,6 @@ func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction)
}
toRemoveIterator := css.virtualUTXODiffStaging.ToRemove().Iterator()
defer toRemoveIterator.Close()
for ok := toRemoveIterator.First(); ok; ok = toRemoveIterator.Next() {
toRemoveOutpoint, _, err := toRemoveIterator.Get()
if err != nil {
@@ -57,7 +56,6 @@ func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction)
}
toAddIterator := css.virtualUTXODiffStaging.ToAdd().Iterator()
defer toAddIterator.Close()
for ok := toAddIterator.First(); ok; ok = toAddIterator.Next() {
toAddOutpoint, toAddEntry, err := toAddIterator.Get()
if err != nil {
@@ -158,7 +156,6 @@ func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader,
if err != nil {
return nil, err
}
defer cursor.Close()
if fromOutpoint != nil {
serializedFromOutpoint, err := serializeOutpoint(fromOutpoint)
@@ -173,7 +170,6 @@ func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader,
}
iterator := newCursorUTXOSetIterator(cursor)
defer iterator.Close()
outpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, 0, limit)
for len(outpointAndUTXOEntryPairs) < limit && iterator.Next() {
@@ -204,8 +200,7 @@ func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader)
}
type utxoSetIterator struct {
cursor model.DBCursor
isClosed bool
cursor model.DBCursor
}
func newCursorUTXOSetIterator(cursor model.DBCursor) externalapi.ReadOnlyUTXOSetIterator {
@@ -213,23 +208,14 @@ func newCursorUTXOSetIterator(cursor model.DBCursor) externalapi.ReadOnlyUTXOSet
}
func (u utxoSetIterator) First() bool {
if u.isClosed {
panic("Tried using a closed utxoSetIterator")
}
return u.cursor.First()
}
func (u utxoSetIterator) Next() bool {
if u.isClosed {
panic("Tried using a closed utxoSetIterator")
}
return u.cursor.Next()
}
func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) {
if u.isClosed {
return nil, nil, errors.New("Tried using a closed utxoSetIterator")
}
key, err := u.cursor.Key()
if err != nil {
panic(err)
@@ -252,16 +238,3 @@ func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry
return outpoint, utxoEntry, nil
}
func (u utxoSetIterator) Close() error {
if u.isClosed {
return errors.New("Tried using a closed utxoSetIterator")
}
u.isClosed = true
err := u.cursor.Close()
if err != nil {
return err
}
u.cursor = nil
return nil
}

View File

@@ -45,7 +45,6 @@ func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbCo
if err != nil {
return err
}
defer deleteCursor.Close()
for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() {
key, err := deleteCursor.Key()
if err != nil {

View File

@@ -6,7 +6,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
)
var importedPruningPointUTXOsBucket = database.MakeBucket([]byte("imported-pruning-point-utxos"))
@@ -17,7 +16,6 @@ func (ps *pruningStore) ClearImportedPruningPointUTXOs(dbContext model.DBWriter)
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
key, err := cursor.Key()
@@ -62,8 +60,7 @@ func (ps *pruningStore) ImportedPruningPointUTXOIterator(dbContext model.DBReade
}
type utxoSetIterator struct {
cursor model.DBCursor
isClosed bool
cursor model.DBCursor
}
func (ps *pruningStore) newCursorUTXOSetIterator(cursor model.DBCursor) externalapi.ReadOnlyUTXOSetIterator {
@@ -71,23 +68,14 @@ func (ps *pruningStore) newCursorUTXOSetIterator(cursor model.DBCursor) external
}
func (u *utxoSetIterator) First() bool {
if u.isClosed {
panic("Tried using a closed utxoSetIterator")
}
return u.cursor.First()
}
func (u *utxoSetIterator) Next() bool {
if u.isClosed {
panic("Tried using a closed utxoSetIterator")
}
return u.cursor.Next()
}
func (u *utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) {
if u.isClosed {
return nil, nil, errors.New("Tried using a closed utxoSetIterator")
}
key, err := u.cursor.Key()
if err != nil {
panic(err)
@@ -111,19 +99,6 @@ func (u *utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry
return outpoint, utxoEntry, nil
}
func (u *utxoSetIterator) Close() error {
if u.isClosed {
return errors.New("Tried using a closed utxoSetIterator")
}
u.isClosed = true
err := u.cursor.Close()
if err != nil {
return err
}
u.cursor = nil
return nil
}
func (ps *pruningStore) importedPruningPointUTXOKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
serializedOutpoint, err := serializeOutpoint(outpoint)
if err != nil {
@@ -200,7 +175,6 @@ func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWrit
if err != nil {
return err
}
defer deleteCursor.Close()
for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() {
key, err := deleteCursor.Key()
if err != nil {
@@ -217,7 +191,6 @@ func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWrit
if err != nil {
return err
}
defer insertCursor.Close()
for ok := insertCursor.First(); ok; ok = insertCursor.Next() {
importedPruningPointUTXOSetKey, err := insertCursor.Key()
if err != nil {

View File

@@ -124,7 +124,6 @@ func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter,
if err != nil {
return err
}
defer deleteCursor.Close()
for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() {
key, err := deleteCursor.Key()
if err != nil {
@@ -216,7 +215,6 @@ func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader,
if err != nil {
return nil, err
}
defer cursor.Close()
if fromOutpoint != nil {
serializedFromOutpoint, err := serializeOutpoint(fromOutpoint)
@@ -231,7 +229,6 @@ func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader,
}
pruningPointUTXOIterator := ps.newCursorUTXOSetIterator(cursor)
defer pruningPointUTXOIterator.Close()
outpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, 0, limit)
for len(outpointAndUTXOEntryPairs) < limit && pruningPointUTXOIterator.Next() {

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.BDAG)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -7,5 +7,4 @@ type BlockIterator interface {
First() bool
Next() bool
Get() (*externalapi.DomainHash, error)
Close() error
}

View File

@@ -9,7 +9,6 @@ type Consensus interface {
GetBlock(blockHash *DomainHash) (*DomainBlock, error)
GetBlockHeader(blockHash *DomainHash) (BlockHeader, error)
GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error)
GetBlockChildren(blockHash *DomainHash) ([]*DomainHash, error)
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
GetHashesBetween(lowHash, highHash *DomainHash, maxBlueScoreDifference uint64) ([]*DomainHash, error)

View File

@@ -6,5 +6,4 @@ type ReadOnlyUTXOSetIterator interface {
First() bool
Next() bool
Get() (outpoint *DomainOutpoint, utxoEntry UTXOEntry, err error)
Close() error
}

View File

@@ -10,6 +10,9 @@ type DomainSubnetworkID [DomainSubnetworkIDSize]byte
// String stringifies a subnetwork ID.
func (id DomainSubnetworkID) String() string {
for i := 0; i < DomainSubnetworkIDSize/2; i++ {
id[i], id[DomainSubnetworkIDSize-1-i] = id[DomainSubnetworkIDSize-1-i], id[i]
}
return hex.EncodeToString(id[:])
}

View File

@@ -11,7 +11,6 @@ type DAGTopologyManager interface {
IsChildOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
IsAncestorOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
IsAncestorOfAny(blockHash *externalapi.DomainHash, potentialDescendants []*externalapi.DomainHash) (bool, error)
IsAnyAncestorOf(potentialAncestors []*externalapi.DomainHash, blockHash *externalapi.DomainHash) (bool, error)
IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error)
ChildInSelectedParentChainOf(context, highHash *externalapi.DomainHash) (*externalapi.DomainHash, error)

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BLVA")
var log, _ = logger.Get(logger.SubsystemTags.BLVL)

View File

@@ -86,7 +86,6 @@ func checkBlockUTXOCommitment(t *testing.T, consensus testapi.TestConsensus, blo
if err != nil {
t.Fatalf("Error restoring past UTXO of block %s: %+v", blockName, err)
}
defer utxoSetIterator.Close()
// Build a Multiset
ms := multiset.New()

View File

@@ -88,7 +88,6 @@ func (csm *consensusStateManager) importPruningPoint(newPruningPoint *externalap
if err != nil {
return err
}
defer importedPruningPointUTXOIterator.Close()
// Clone the pruningPoint block here because validateBlockTransactionsAgainstPastUTXO
// assumes that the block UTXOEntries are pre-filled during further validations
@@ -177,7 +176,6 @@ func (csm *consensusStateManager) importVirtualUTXOSetAndPruningPointUTXOSet() e
if err != nil {
return err
}
defer pruningPointUTXOSetIterator.Close()
log.Debugf("Importing the virtual UTXO set")
err = csm.consensusStateStore.ImportPruningPointUTXOSetIntoVirtualUTXOSet(csm.databaseContext, pruningPointUTXOSetIterator)

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

View File

@@ -34,6 +34,7 @@ func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainH
}
log.Debugf("The selected parent of the virtual is: %s", virtualSelectedParent)
selectedVirtualParents := hashset.NewFromSlice(virtualSelectedParent)
candidates := candidatesHeap.ToSlice()
// prioritize half the blocks with highest blueWork and half with lowest, so the network will merge splits faster.
if len(candidates) >= int(csm.maxBlockParents) {
@@ -45,14 +46,7 @@ func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainH
end--
}
}
// Limit to maxBlockParents*3 candidates, that way we don't go over thousands of tips when the network isn't healthy.
// There's no specific reason for a factor of 3, and its not a consensus rule, just an estimation saying we probably
// don't want to consider and calculate 3 times the amount of candidates for the set of parents.
if len(candidates) > int(csm.maxBlockParents)*3 {
candidates = candidates[:int(csm.maxBlockParents)*3]
}
selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent}
mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet
for len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) {
@@ -62,68 +56,32 @@ func (csm *consensusStateManager) pickVirtualParents(tips []*externalapi.DomainH
log.Debugf("Attempting to add %s to the virtual parents", candidate)
log.Debugf("The current merge set size is %d", mergeSetSize)
canBeParent, newCandidate, mergeSetIncrease, err := csm.mergeSetIncrease(candidate, selectedVirtualParents, mergeSetSize)
mergeSetIncrease, err := csm.mergeSetIncrease(candidate, selectedVirtualParents)
if err != nil {
return nil, err
}
if canBeParent {
mergeSetSize += mergeSetIncrease
selectedVirtualParents = append(selectedVirtualParents, candidate)
log.Tracef("Added block %s to the virtual parents set", candidate)
log.Debugf("The merge set would increase by %d with block %s", mergeSetIncrease, candidate)
if mergeSetSize+mergeSetIncrease > csm.mergeSetSizeLimit {
log.Debugf("Cannot add block %s since that would violate the merge set size limit", candidate)
continue
}
// If we already have a candidate in the past of newCandidate then skip.
isInFutureOfCandidates, err := csm.dagTopologyManager.IsAnyAncestorOf(candidates, newCandidate)
if err != nil {
return nil, err
}
if isInFutureOfCandidates {
continue
}
// Remove all candidates in the future of newCandidate
candidates, err = csm.removeHashesInFutureOf(candidates, newCandidate)
if err != nil {
return nil, err
}
candidates = append(candidates, newCandidate)
log.Debugf("Block %s increases merge set too much, instead adding its ancestor %s", candidate, newCandidate)
selectedVirtualParents.Add(candidate)
mergeSetSize += mergeSetIncrease
log.Tracef("Added block %s to the virtual parents set", candidate)
}
boundedMergeBreakingParents, err := csm.boundedMergeBreakingParents(selectedVirtualParents)
boundedMergeBreakingParents, err := csm.boundedMergeBreakingParents(selectedVirtualParents.ToSlice())
if err != nil {
return nil, err
}
log.Tracef("The following parents are omitted for "+
"breaking the bounded merge set: %s", boundedMergeBreakingParents)
// Remove all boundedMergeBreakingParents from selectedVirtualParents
for _, breakingParent := range boundedMergeBreakingParents {
for i, parent := range selectedVirtualParents {
if parent.Equal(breakingParent) {
selectedVirtualParents[i] = selectedVirtualParents[len(selectedVirtualParents)-1]
selectedVirtualParents = selectedVirtualParents[:len(selectedVirtualParents)-1]
break
}
}
}
log.Debugf("The virtual parents resolved to be: %s", selectedVirtualParents)
return selectedVirtualParents, nil
}
func (csm *consensusStateManager) removeHashesInFutureOf(hashes []*externalapi.DomainHash, ancestor *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
// Source: https://github.com/golang/go/wiki/SliceTricks#filter-in-place
i := 0
for _, hash := range hashes {
isInFutureOfAncestor, err := csm.dagTopologyManager.IsAncestorOf(ancestor, hash)
if err != nil {
return nil, err
}
if !isInFutureOfAncestor {
hashes[i] = hash
i++
}
}
return hashes[:i], nil
virtualParents := selectedVirtualParents.Subtract(boundedMergeBreakingParents).ToSlice()
log.Tracef("The virtual parents resolved to be: %s", virtualParents)
return virtualParents, nil
}
func (csm *consensusStateManager) selectVirtualSelectedParent(
@@ -195,66 +153,59 @@ func (csm *consensusStateManager) selectVirtualSelectedParent(
}
}
// mergeSetIncrease returns different things depending on the result:
// If the candidate can be a virtual parent then canBeParent=true and mergeSetIncrease=The increase in merge set size
// If the candidate can't be a virtual parent, then canBeParent=false and newCandidate is a new proposed candidate in the past of candidate.
func (csm *consensusStateManager) mergeSetIncrease(candidate *externalapi.DomainHash, selectedVirtualParents []*externalapi.DomainHash, mergeSetSize uint64,
) (canBeParent bool, newCandidate *externalapi.DomainHash, mergeSetIncrease uint64, err error) {
func (csm *consensusStateManager) mergeSetIncrease(
candidate *externalapi.DomainHash, selectedVirtualParents hashset.HashSet) (uint64, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "mergeSetIncrease")
defer onEnd()
visited := hashset.New()
// Start with the candidate's parents in the queue as we already know the candidate isn't an ancestor of the selectedVirtualParents.
parents, err := csm.dagTopologyManager.Parents(candidate)
queue := csm.dagTraversalManager.NewDownHeap()
err := queue.Push(candidate)
if err != nil {
return false, nil, 0, err
return 0, err
}
for _, parent := range parents {
visited.Add(parent)
}
queue := parents
mergeSetIncrease = uint64(1) // starts with 1 for the candidate itself
mergeSetIncrease := uint64(1) // starts with 1 for the candidate itself
var current *externalapi.DomainHash
for len(queue) > 0 {
current, queue = queue[0], queue[1:]
for queue.Len() > 0 {
current := queue.Pop()
log.Tracef("Attempting to increment the merge set size increase for block %s", current)
isInPastOfSelectedVirtualParents, err := csm.dagTopologyManager.IsAncestorOfAny(current, selectedVirtualParents)
isInPastOfSelectedVirtualParents, err := csm.dagTopologyManager.IsAncestorOfAny(
current, selectedVirtualParents.ToSlice())
if err != nil {
return false, nil, 0, err
return 0, err
}
if isInPastOfSelectedVirtualParents {
log.Tracef("Skipping block %s because it's in the past of one (or more) of the selected virtual parents", current)
log.Tracef("Skipping block %s because it's in the past of one "+
"(or more) of the selected virtual parents", current)
continue
}
log.Tracef("Incrementing the merge set size increase")
mergeSetIncrease++
if (mergeSetSize + mergeSetIncrease) > csm.mergeSetSizeLimit {
log.Debugf("The merge set would increase by more than the limit with block %s", candidate)
return false, current, mergeSetIncrease, nil
}
parents, err := csm.dagTopologyManager.Parents(current)
if err != nil {
return false, nil, 0, err
return 0, err
}
for _, parent := range parents {
if !visited.Contains(parent) {
visited.Add(parent)
queue = append(queue, parent)
err = queue.Push(parent)
if err != nil {
return 0, err
}
}
}
}
log.Debugf("The resolved merge set size increase is: %d", mergeSetIncrease)
return true, nil, mergeSetIncrease, nil
return mergeSetIncrease, nil
}
func (csm *consensusStateManager) boundedMergeBreakingParents(
parents []*externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
parents []*externalapi.DomainHash) (hashset.HashSet, error) {
onEnd := logger.LogAndMeasureExecutionTime(log, "boundedMergeBreakingParents")
defer onEnd()
@@ -320,7 +271,7 @@ func (csm *consensusStateManager) boundedMergeBreakingParents(
}
}
var boundedMergeBreakingParents []*externalapi.DomainHash
boundedMergeBreakingParents := hashset.New()
for _, parent := range parents {
log.Debugf("Checking whether parent %s breaks the bounded merge set", parent)
isBadRedInPast := false
@@ -336,7 +287,7 @@ func (csm *consensusStateManager) boundedMergeBreakingParents(
}
if isBadRedInPast {
log.Debugf("Adding parent %s to the bounded merge breaking parents set", parent)
boundedMergeBreakingParents = append(boundedMergeBreakingParents, parent)
boundedMergeBreakingParents.Add(parent)
}
}

View File

@@ -87,22 +87,6 @@ func (dtm *dagTopologyManager) IsAncestorOfAny(blockHash *externalapi.DomainHash
return false, nil
}
// IsAnyAncestorOf returns true if at least one of `potentialAncestors` is an ancestor of `blockHash`
func (dtm *dagTopologyManager) IsAnyAncestorOf(potentialAncestors []*externalapi.DomainHash, blockHash *externalapi.DomainHash) (bool, error) {
for _, potentialAncestor := range potentialAncestors {
isAncestorOf, err := dtm.IsAncestorOf(potentialAncestor, blockHash)
if err != nil {
return false, err
}
if isAncestorOf {
return true, nil
}
}
return false, nil
}
// IsInSelectedParentChainOf returns true if blockHashA is in the selected parent chain of blockHashB
func (dtm *dagTopologyManager) IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) {
return dtm.reachabilityManager.IsReachabilityTreeAncestorOf(blockHashA, blockHashB)

View File

@@ -14,21 +14,14 @@ type selectedChildIterator struct {
highHash, lowHash *externalapi.DomainHash
current *externalapi.DomainHash
err error
isClosed bool
}
func (s *selectedChildIterator) First() bool {
if s.isClosed {
panic("Tried using a closed SelectedChildIterator")
}
s.current = s.lowHash
return s.Next()
}
func (s *selectedChildIterator) Next() bool {
if s.isClosed {
panic("Tried using a closed SelectedChildIterator")
}
if s.err != nil {
return true
}
@@ -57,27 +50,9 @@ func (s *selectedChildIterator) Next() bool {
}
func (s *selectedChildIterator) Get() (*externalapi.DomainHash, error) {
if s.isClosed {
return nil, errors.New("Tried using a closed SelectedChildIterator")
}
return s.current, s.err
}
func (s *selectedChildIterator) Close() error {
if s.isClosed {
return errors.New("Tried using a closed SelectedChildIterator")
}
s.isClosed = true
s.databaseContext = nil
s.dagTopologyManager = nil
s.reachabilityDataStore = nil
s.highHash = nil
s.lowHash = nil
s.current = nil
s.err = nil
return nil
}
// SelectedChildIterator returns a BlockIterator that iterates from lowHash (exclusive) to highHash (inclusive) over
// highHash's selected parent chain
func (dtm *dagTraversalManager) SelectedChildIterator(highHash, lowHash *externalapi.DomainHash) (model.BlockIterator, error) {

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BDAG")
var log, _ = logger.Get(logger.SubsystemTags.BDAG)

View File

@@ -390,9 +390,6 @@ func (dt *DAGTopologyManagerImpl) IsAncestorOf(hashBlockA *externalapi.DomainHas
func (dt *DAGTopologyManagerImpl) IsAncestorOfAny(blockHash *externalapi.DomainHash, potentialDescendants []*externalapi.DomainHash) (bool, error) {
panic("unimplemented")
}
func (dt *DAGTopologyManagerImpl) IsAnyAncestorOf([]*externalapi.DomainHash, *externalapi.DomainHash) (bool, error) {
panic("unimplemented")
}
func (dt *DAGTopologyManagerImpl) IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) {
panic("unimplemented")
}

View File

@@ -2,4 +2,4 @@ package pruningmanager
import "github.com/kaspanet/kaspad/infrastructure/logger"
var log = logger.RegisterSubSystem("PRNM")
var log, _ = logger.Get(logger.SubsystemTags.PRNM)

View File

@@ -133,7 +133,6 @@ func (pm *pruningManager) UpdatePruningPointByVirtual() error {
if err != nil {
return err
}
defer iterator.Close()
// Finding the next pruning point candidate: look for the latest
// selected child of the current candidate that is in depth of at
@@ -426,7 +425,6 @@ func (pm *pruningManager) validateUTXOSetFitsCommitment(pruningPointHash *extern
if err != nil {
return err
}
defer utxoSetIterator.Close()
utxoSetMultiset := multiset.New()
for ok := utxoSetIterator.First(); ok; ok = utxoSetIterator.Next() {
@@ -546,7 +544,6 @@ func (pm *pruningManager) updatePruningPointUTXOSet() error {
if err != nil {
return err
}
defer utxoSetIterator.Close()
log.Debugf("Updating the pruning point UTXO set")
err = pm.pruningStore.UpdatePruningPointUTXOSet(pm.databaseContext, utxoSetIterator)
@@ -566,7 +563,6 @@ func (pm *pruningManager) PruneAllBlocksBelow(pruningPointHash *externalapi.Doma
if err != nil {
return err
}
defer iterator.Close()
for ok := iterator.First(); ok; ok = iterator.Next() {
blockHash, err := iterator.Get()

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("REAC")
var log, _ = logger.Get(logger.SubsystemTags.REAC)

View File

@@ -1,16 +0,0 @@
package reachabilitymanager_test
import (
"os"
"testing"
"github.com/kaspanet/kaspad/infrastructure/logger"
)
const logLevel = logger.LevelWarn
func TestMain(m *testing.M) {
logger.SetLogLevels(logLevel)
logger.InitLogStdout(logLevel)
os.Exit(m.Run())
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/pkg/errors"
"math"
"math/rand"
@@ -15,10 +16,14 @@ import (
)
// Test configuration
const numBlocksExponent = 12
const (
numBlocksExponent = 12
logLevel = "warn"
)
func initializeTest(t *testing.T, testName string) (tc testapi.TestConsensus, teardown func(keepDataDir bool)) {
t.Parallel()
logger.SetLogLevels(logLevel)
params := dagconfig.SimnetParams
params.SkipProofOfWork = true
tc, teardown, err := consensus.NewFactory().NewTestConsensus(&params, false, testName)

View File

@@ -59,7 +59,6 @@ func (sm *syncManager) antiPastHashesBetween(lowHash, highHash *externalapi.Doma
if err != nil {
return nil, err
}
defer iterator.Close()
for ok := iterator.First(); ok; ok = iterator.Next() {
current, err := iterator.Get()
if err != nil {
@@ -146,7 +145,6 @@ func (sm *syncManager) findHighHashAccordingToMaxBlueScoreDifference(lowHash *ex
if err != nil {
return nil, err
}
defer iterator.Close()
for ok := iterator.First(); ok; ok = iterator.Next() {
highHashCandidate, err := iterator.Get()
if err != nil {
@@ -193,7 +191,6 @@ func (sm *syncManager) missingBlockBodyHashes(highHash *externalapi.DomainHash)
if err != nil {
return nil, err
}
defer selectedChildIterator.Close()
lowHash := pruningPoint
foundHeaderOnlyBlock := false

View File

@@ -4,4 +4,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("SYNC")
var log, _ = logger.Get(logger.SubsystemTags.SYNC)

View File

@@ -26,7 +26,6 @@ func (tc *testConsensus) convertToDot() (string, error) {
if err != nil {
return "", err
}
defer blocksIterator.Close()
for ok := blocksIterator.First(); ok; ok = blocksIterator.Next() {
hash, err := blocksIterator.Get()

View File

@@ -1,11 +1,23 @@
package subnetworks
import (
"bytes"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
func cmp(a, b externalapi.DomainSubnetworkID) int {
// We compare the hashes backwards because Hash is stored as a little endian byte array.
for i := externalapi.DomainSubnetworkIDSize - 1; i >= 0; i-- {
switch {
case a[i] < b[i]:
return -1
case a[i] > b[i]:
return 1
}
}
return 0
}
// Less returns true iff id a is less than id b
func Less(a, b externalapi.DomainSubnetworkID) bool {
return bytes.Compare(a[:], b[:]) < 0
return cmp(a, b) < 0
}

View File

@@ -8,7 +8,11 @@ import (
// FromString creates a DomainSubnetworkID from the given byte slice
func FromString(str string) (*externalapi.DomainSubnetworkID, error) {
subnetworkIDBytes, err := hex.DecodeString(str)
runes := []rune(str)
for i := 0; i < externalapi.DomainSubnetworkIDSize*2; i++ {
runes[i], runes[externalapi.DomainSubnetworkIDSize-1-i] = runes[externalapi.DomainSubnetworkIDSize-1-i], runes[i]
}
subnetworkIDBytes, err := hex.DecodeString(string(runes))
if err != nil {
return nil, err
}

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("SCRP")
var log, _ = logger.Get(logger.SubsystemTags.SCRP)

View File

@@ -9,7 +9,6 @@ import (
func TestMain(m *testing.M) {
// set log level to trace, so that logClosures passed to log.Tracef are covered
log.SetLevel(logger.LevelTrace)
logger.InitLogStdout(logger.LevelTrace)
os.Exit(m.Run())
}

View File

@@ -11,9 +11,8 @@ type utxoOutpointEntryPair struct {
}
type utxoCollectionIterator struct {
index int
pairs []utxoOutpointEntryPair
isClosed bool
index int
pairs []utxoOutpointEntryPair
}
func (uc utxoCollection) Iterator() externalapi.ReadOnlyUTXOSetIterator {
@@ -30,33 +29,21 @@ func (uc utxoCollection) Iterator() externalapi.ReadOnlyUTXOSetIterator {
}
func (uci *utxoCollectionIterator) First() bool {
if uci.isClosed {
panic("Tried using a closed utxoCollectionIterator")
}
uci.index = 0
return len(uci.pairs) > 0
}
func (uci *utxoCollectionIterator) Next() bool {
if uci.isClosed {
panic("Tried using a closed utxoCollectionIterator")
}
uci.index++
return uci.index < len(uci.pairs)
}
func (uci *utxoCollectionIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) {
if uci.isClosed {
return nil, nil, errors.New("Tried using a closed utxoCollectionIterator")
}
pair := uci.pairs[uci.index]
return &pair.outpoint, pair.entry, nil
}
func (uci *utxoCollectionIterator) WithDiff(diff externalapi.UTXODiff) (externalapi.ReadOnlyUTXOSetIterator, error) {
if uci.isClosed {
return nil, errors.New("Tried using a closed utxoCollectionIterator")
}
d, ok := diff.(*immutableUTXODiff)
if !ok {
return nil, errors.New("diff is not of type *immutableUTXODiff")
@@ -68,12 +55,3 @@ func (uci *utxoCollectionIterator) WithDiff(diff externalapi.UTXODiff) (external
toAddIterator: diff.ToAdd().Iterator(),
}, nil
}
func (uci *utxoCollectionIterator) Close() error {
if uci.isClosed {
return errors.New("Tried using a closed utxoCollectionIterator")
}
uci.isClosed = true
uci.pairs = nil
return nil
}

View File

@@ -14,7 +14,6 @@ type readOnlyUTXOIteratorWithDiff struct {
currentErr error
toAddIterator externalapi.ReadOnlyUTXOSetIterator
isClosed bool
}
// IteratorWithDiff applies a UTXODiff to given utxo iterator
@@ -41,17 +40,9 @@ func IteratorWithDiff(iterator externalapi.ReadOnlyUTXOSetIterator, diff externa
}
func (r *readOnlyUTXOIteratorWithDiff) First() bool {
if r.isClosed {
panic("Tried using a closed readOnlyUTXOIteratorWithDiff")
}
baseNotEmpty := r.baseIterator.First()
baseEmpty := !baseNotEmpty
err := r.toAddIterator.Close()
if err != nil {
r.currentErr = err
return true
}
r.toAddIterator = r.diff.ToAdd().Iterator()
toAddEmpty := r.diff.ToAdd().Len() == 0
@@ -70,9 +61,6 @@ func (r *readOnlyUTXOIteratorWithDiff) First() bool {
}
func (r *readOnlyUTXOIteratorWithDiff) Next() bool {
if r.isClosed {
panic("Tried using a closed readOnlyUTXOIteratorWithDiff")
}
for r.baseIterator.Next() { // keep looping until we reach an outpoint/entry pair that is not in r.diff.toRemove
r.currentOutpoint, r.currentUTXOEntry, r.currentErr = r.baseIterator.Get()
if !r.diff.mutableUTXODiff.toRemove.containsWithBlueScore(r.currentOutpoint, r.currentUTXOEntry.BlockBlueScore()) {
@@ -89,30 +77,5 @@ func (r *readOnlyUTXOIteratorWithDiff) Next() bool {
}
func (r *readOnlyUTXOIteratorWithDiff) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) {
if r.isClosed {
return nil, nil, errors.New("Tried using a closed readOnlyUTXOIteratorWithDiff")
}
return r.currentOutpoint, r.currentUTXOEntry, r.currentErr
}
func (r *readOnlyUTXOIteratorWithDiff) Close() error {
if r.isClosed {
return errors.New("Tried using a closed readOnlyUTXOIteratorWithDiff")
}
r.isClosed = true
err := r.baseIterator.Close()
if err != nil {
return err
}
err = r.toAddIterator.Close()
if err != nil {
return err
}
r.baseIterator = nil
r.diff = nil
r.currentOutpoint = nil
r.currentUTXOEntry = nil
r.currentErr = nil
r.toAddIterator = nil
return nil
}

View File

@@ -260,7 +260,6 @@ var TestnetParams = Params{
Net: appmessage.Testnet,
RPCPort: "16210",
DefaultPort: "16211",
DNSSeeds: []string{"testnet-2-dnsseed.daglabs-dev.com"},
// DAG parameters
GenesisBlock: &testnetGenesisBlock,

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("BLTB")
var log, _ = logger.Get(logger.SubsystemTags.MINR)

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("TXMP")
var log, _ = logger.Get(logger.SubsystemTags.TXMP)

View File

@@ -11,8 +11,6 @@ import (
"sync"
"time"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionhelper"
@@ -22,6 +20,7 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/estimatedsize"
miningmanagermodel "github.com/kaspanet/kaspad/domain/miningmanager/model"
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
@@ -230,7 +229,9 @@ func (mp *mempool) limitNumOrphans() error {
numOrphans := len(mp.orphans)
if numExpired := origNumOrphans - numOrphans; numExpired > 0 {
log.Debugf("Expired %d orphans (remaining: %d)", numExpired, numOrphans)
log.Debugf("Expired %d %s (remaining: %d)", numExpired,
logger.PickNoun(uint64(numExpired), "orphan", "orphans"),
numOrphans)
}
}
@@ -939,14 +940,10 @@ func (mp *mempool) ChainedCount() int {
func (mp *mempool) BlockCandidateTransactions() []*consensusexternalapi.DomainTransaction {
mp.mtx.RLock()
defer mp.mtx.RUnlock()
onEnd := logger.LogAndMeasureExecutionTime(log, "BlockCandidateTransactions")
defer onEnd()
descs := make([]*consensusexternalapi.DomainTransaction, len(mp.pool))
i := 0
for _, desc := range mp.pool {
descs[i] = desc.DomainTransaction.Clone() // Clone the transaction to prevent data races. A shallow-copy might do as well
descs[i] = desc.DomainTransaction
i++
}

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("UTIN")
var log, _ = logger.Get(logger.SubsystemTags.INDX)

View File

@@ -45,7 +45,8 @@ func serializeHashes(hashes []*externalapi.DomainHash) []byte {
for i, hash := range hashes {
start := hashesLengthSize + externalapi.DomainHashSize*i
end := start + externalapi.DomainHashSize
copy(serializedHashes[start:end], hash.ByteSlice())
copy(serializedHashes[start:end],
hash.ByteSlice())
}
return serializedHashes
}
@@ -62,11 +63,12 @@ func deserializeHashes(serializedHashes []byte) ([]*externalapi.DomainHash, erro
}
var err error
hashes[i], err = externalapi.NewDomainHashFromByteSlice(serializedHashes[start:end])
hashes[i], err = externalapi.
NewDomainHashFromByteSlice(serializedHashes[start:end])
if err != nil {
return nil, err
}
}
return hashes, nil
return nil, nil
}

View File

@@ -1,44 +0,0 @@
package utxoindex
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
"io"
"math/rand"
"testing"
)
func Test_serializeHashes(t *testing.T) {
r := rand.New(rand.NewSource(0))
for length := 0; length < 32; length++ {
hashes := make([]*externalapi.DomainHash, length)
for i := range hashes {
var hashBytes [32]byte
r.Read(hashBytes[:])
hashes[i] = externalapi.NewDomainHashFromByteArray(&hashBytes)
}
result, err := deserializeHashes(serializeHashes(hashes))
if err != nil {
t.Fatalf("Failed deserializing hashes: %v", err)
}
if !externalapi.HashesEqual(hashes, result) {
t.Fatalf("Expected \n %s \n==\n %s\n", hashes, result)
}
}
}
func Test_deserializeHashesFailure(t *testing.T) {
hashes := []*externalapi.DomainHash{
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}),
externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}),
}
serialized := serializeHashes(hashes)
binary.LittleEndian.PutUint64(serialized[:8], uint64(len(hashes)+1))
_, err := deserializeHashes(serialized)
if !errors.Is(err, io.ErrUnexpectedEOF) {
t.Fatalf("Expected error to be EOF, instead got: %v", err)
}
}

View File

@@ -249,7 +249,6 @@ func (uis *utxoIndexStore) getUTXOOutpointEntryPairs(scriptPublicKey *externalap
if err != nil {
return nil, err
}
defer cursor.Close()
utxoOutpointEntryPairs := make(UTXOOutpointEntryPairs)
for cursor.Next() {
key, err := cursor.Key()
@@ -298,7 +297,6 @@ func (uis *utxoIndexStore) deleteAll() error {
if err != nil {
return err
}
defer cursor.Close()
for cursor.Next() {
key, err := cursor.Key()
if err != nil {

View File

@@ -131,7 +131,6 @@ func (ui *UTXOIndex) Update(blockInsertionResult *externalapi.BlockInsertionResu
func (ui *UTXOIndex) addUTXOs(toAdd externalapi.UTXOCollection) error {
iterator := toAdd.Iterator()
defer iterator.Close()
for ok := iterator.First(); ok; ok = iterator.Next() {
outpoint, entry, err := iterator.Get()
if err != nil {
@@ -149,7 +148,6 @@ func (ui *UTXOIndex) addUTXOs(toAdd externalapi.UTXOCollection) error {
func (ui *UTXOIndex) removeUTXOs(toRemove externalapi.UTXOCollection) error {
iterator := toRemove.Iterator()
defer iterator.Close()
for ok := iterator.First(); ok; ok = iterator.Next() {
outpoint, entry, err := iterator.Get()
if err != nil {

View File

@@ -8,4 +8,4 @@ import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
var log = logger.RegisterSubSystem("CNFG")
var log, _ = logger.Get(logger.SubsystemTags.CNFG)

View File

@@ -106,8 +106,7 @@ func (c *LevelDBCursor) Close() error {
return errors.New("cannot close an already closed cursor")
}
c.isClosed = true
c.ldbIterator.Release()
c.ldbIterator = nil
c.bucket = nil
return nil
}

View File

@@ -1,7 +1,5 @@
package ldb
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
)
import "github.com/kaspanet/kaspad/infrastructure/logger"
var log = logger.RegisterSubSystem("KSDB")
var log, _ = logger.Get(logger.SubsystemTags.KSDB)

View File

@@ -1,191 +0,0 @@
package logger
import (
"fmt"
"github.com/jrick/logrotate/rotator"
"github.com/pkg/errors"
"io"
"os"
"path/filepath"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
)
const normalLogSize = 512
// defaultFlags specifies changes to the default logger behavior. It is set
// during package init and configured using the LOGFLAGS environment variable.
// New logger backends can override these default flags using WithFlags.
// We're using this instead of `init()` function because variables are initialized before init functions,
// and this variable is used inside other variable intializations, so runs before them
var defaultFlags = getDefaultFlags()
// Flags to modify Backend's behavior.
const (
// LogFlagLongFile modifies the logger output to include full path and line number
// of the logging callsite, e.g. /a/b/c/main.go:123.
LogFlagLongFile uint32 = 1 << iota
// LogFlagShortFile modifies the logger output to include filename and line number
// of the logging callsite, e.g. main.go:123. takes precedence over LogFlagLongFile.
LogFlagShortFile
)
// Read logger flags from the LOGFLAGS environment variable. Multiple flags can
// be set at once, separated by commas.
func getDefaultFlags() (flags uint32) {
for _, f := range strings.Split(os.Getenv("LOGFLAGS"), ",") {
switch f {
case "longfile":
flags |= LogFlagLongFile
case "shortfile":
flags |= LogFlagShortFile
}
}
return
}
const logsBuffer = 0
// Backend is a logging backend. Subsystems created from the backend write to
// the backend's Writer. Backend provides atomic writes to the Writer from all
// subsystems.
type Backend struct {
flag uint32
isRunning uint32
writers []logWriter
writeChan chan logEntry
syncClose sync.Mutex // used to sync that the logger finished writing everything
}
// NewBackendWithFlags configures a Backend to use the specified flags rather than using
// the package's defaults as determined through the LOGFLAGS environment
// variable.
func NewBackendWithFlags(flags uint32) *Backend {
return &Backend{flag: flags, writeChan: make(chan logEntry, logsBuffer)}
}
// NewBackend creates a new logger backend.
func NewBackend() *Backend {
return NewBackendWithFlags(defaultFlags)
}
const (
defaultThresholdKB = 100 * 1000 // 100 MB logs by default.
defaultMaxRolls = 8 // keep 8 last logs by default.
)
type logWriter interface {
io.WriteCloser
LogLevel() Level
}
type logWriterWrap struct {
io.WriteCloser
logLevel Level
}
func (lw logWriterWrap) LogLevel() Level {
return lw.logLevel
}
// AddLogFile adds a file which the log will write into on a certain
// log level with the default log rotation settings. It'll create the file if it doesn't exist.
func (b *Backend) AddLogFile(logFile string, logLevel Level) error {
return b.AddLogFileWithCustomRotator(logFile, logLevel, defaultThresholdKB, defaultMaxRolls)
}
// AddLogWriter adds a type implementing io.WriteCloser which the log will write into on a certain
// log level with the default log rotation settings. It'll create the file if it doesn't exist.
func (b *Backend) AddLogWriter(logWriter io.WriteCloser, logLevel Level) error {
if b.IsRunning() {
return errors.New("The logger is already running")
}
b.writers = append(b.writers, logWriterWrap{
WriteCloser: logWriter,
logLevel: logLevel,
})
return nil
}
// AddLogFileWithCustomRotator adds a file which the log will write into on a certain
// log level, with the specified log rotation settings.
// It'll create the file if it doesn't exist.
func (b *Backend) AddLogFileWithCustomRotator(logFile string, logLevel Level, thresholdKB int64, maxRolls int) error {
if b.IsRunning() {
return errors.New("The logger is already running")
}
logDir, _ := filepath.Split(logFile)
// if the logDir is empty then `logFile` is in the cwd and there's no need to create any directory.
if logDir != "" {
err := os.MkdirAll(logDir, 0700)
if err != nil {
return errors.Errorf("failed to create log directory: %+v", err)
}
}
r, err := rotator.New(logFile, thresholdKB, false, maxRolls)
if err != nil {
return errors.Errorf("failed to create file rotator: %s", err)
}
b.writers = append(b.writers, logWriterWrap{
WriteCloser: r,
logLevel: logLevel,
})
return nil
}
// Run launches the logger backend in a separate go-routine. should only be called once.
func (b *Backend) Run() error {
if !atomic.CompareAndSwapUint32(&b.isRunning, 0, 1) {
return errors.New("The logger is already running")
}
go func() {
defer func() {
if err := recover(); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Fatal error in logger.Backend goroutine: %+v\n", err)
_, _ = fmt.Fprintf(os.Stderr, "Goroutine stacktrace: %s\n", debug.Stack())
}
}()
b.runBlocking()
}()
return nil
}
func (b *Backend) runBlocking() {
defer atomic.StoreUint32(&b.isRunning, 0)
b.syncClose.Lock()
defer b.syncClose.Unlock()
for log := range b.writeChan {
for _, writer := range b.writers {
if log.level >= writer.LogLevel() {
_, _ = writer.Write(log.log)
}
}
}
}
// IsRunning returns true if backend.Run() has been called and false if it hasn't.
func (b *Backend) IsRunning() bool {
return atomic.LoadUint32(&b.isRunning) != 0
}
// Close finalizes all log rotators for this backend
func (b *Backend) Close() {
close(b.writeChan)
// Wait for it to finish writing using the syncClose mutex.
b.syncClose.Lock()
defer b.syncClose.Unlock()
for _, writer := range b.writers {
_ = writer.Close()
}
}
// Logger returns a new logger for a particular subsystem that writes to the
// Backend b. A tag describes the subsystem and is included in all log
// messages. The logger uses the info verbosity level by default.
func (b *Backend) Logger(subsystemTag string) *Logger {
return &Logger{LevelOff, subsystemTag, b, b.writeChan}
}

View File

@@ -1,54 +0,0 @@
package logger
import "strings"
// Level is the level at which a logger is configured. All messages sent
// to a level which is below the current level are filtered.
type Level uint32
// Level constants.
const (
LevelTrace Level = iota
LevelDebug
LevelInfo
LevelWarn
LevelError
LevelCritical
LevelOff
)
// levelStrs defines the human-readable names for each logging level.
var levelStrs = [...]string{"TRC", "DBG", "INF", "WRN", "ERR", "CRT", "OFF"}
// LevelFromString returns a level based on the input string s. If the input
// can't be interpreted as a valid log level, the info level and false is
// returned.
func LevelFromString(s string) (l Level, ok bool) {
switch strings.ToLower(s) {
case "trace", "trc":
return LevelTrace, true
case "debug", "dbg":
return LevelDebug, true
case "info", "inf":
return LevelInfo, true
case "warn", "wrn":
return LevelWarn, true
case "error", "err":
return LevelError, true
case "critical", "crt":
return LevelCritical, true
case "off":
return LevelOff, true
default:
return LevelInfo, false
}
}
// String returns the tag of the logger used in log messages, or "OFF" if
// the level will not produce any log output.
func (l Level) String() string {
if l >= LevelOff {
return "OFF"
}
return levelStrs[l]
}

View File

@@ -10,7 +10,6 @@ import (
"os"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
)
@@ -27,37 +26,135 @@ var (
// BackendLog is the logging backend used to create all subsystem loggers.
BackendLog = NewBackend()
// subsystemLoggers maps each subsystem identifier to its associated logger.
subsystemLoggers = make(map[string]*Logger)
subsystemLoggersMutex sync.Mutex
adxrLog = BackendLog.Logger("ADXR")
amgrLog = BackendLog.Logger("AMGR")
cmgrLog = BackendLog.Logger("CMGR")
ksdbLog = BackendLog.Logger("KSDB")
kasdLog = BackendLog.Logger("KASD")
bdagLog = BackendLog.Logger("BDAG")
cnfgLog = BackendLog.Logger("CNFG")
discLog = BackendLog.Logger("DISC")
indxLog = BackendLog.Logger("INDX")
minrLog = BackendLog.Logger("MINR")
peerLog = BackendLog.Logger("PEER")
rpcsLog = BackendLog.Logger("RPCS")
rpccLog = BackendLog.Logger("RPCC")
scrpLog = BackendLog.Logger("SCRP")
srvrLog = BackendLog.Logger("SRVR")
syncLog = BackendLog.Logger("SYNC")
txmpLog = BackendLog.Logger("TXMP")
utilLog = BackendLog.Logger("UTIL")
profLog = BackendLog.Logger("PROF")
protLog = BackendLog.Logger("PROT")
muxxLog = BackendLog.Logger("MUXX")
grpcLog = BackendLog.Logger("GRPC")
p2psLog = BackendLog.Logger("P2PS")
ntarLog = BackendLog.Logger("NTAR")
dnssLog = BackendLog.Logger("DNSS")
snvrLog = BackendLog.Logger("SNVR")
wsvcLog = BackendLog.Logger("WSVC")
reacLog = BackendLog.Logger("REAC")
prnmLog = BackendLog.Logger("PRNM")
blvlLog = BackendLog.Logger("BLVL")
)
// RegisterSubSystem Registers a new subsystem logger, should be called in a global variable,
// returns the existing one if the subsystem is already registered
func RegisterSubSystem(subsystem string) *Logger {
subsystemLoggersMutex.Lock()
defer subsystemLoggersMutex.Unlock()
logger, exists := subsystemLoggers[subsystem]
if !exists {
logger = BackendLog.Logger(subsystem)
subsystemLoggers[subsystem] = logger
}
return logger
// SubsystemTags is an enum of all sub system tags
var SubsystemTags = struct {
ADXR,
AMGR,
CMGR,
KSDB,
KASD,
BDAG,
CNFG,
DISC,
INDX,
MINR,
PEER,
RPCS,
RPCC,
SCRP,
SRVR,
SYNC,
TXMP,
UTIL,
PROF,
PROT,
MUXX,
GRPC,
P2PS,
NTAR,
DNSS,
SNVR,
WSVC,
REAC,
PRNM,
BLVL string
}{
ADXR: "ADXR",
AMGR: "AMGR",
CMGR: "CMGR",
KSDB: "KSDB",
KASD: "KASD",
BDAG: "BDAG",
CNFG: "CNFG",
DISC: "DISC",
INDX: "INDX",
MINR: "MINR",
PEER: "PEER",
RPCS: "RPCS",
RPCC: "RPCC",
SCRP: "SCRP",
SRVR: "SRVR",
SYNC: "SYNC",
TXMP: "TXMP",
UTIL: "UTIL",
PROF: "PROF",
PROT: "PROT",
MUXX: "MUXX",
GRPC: "GRPC",
P2PS: "P2PS",
NTAR: "NTAR",
DNSS: "DNSS",
SNVR: "SNVR",
WSVC: "WSVC",
REAC: "REAC",
PRNM: "PRNM",
BLVL: "BLVL",
}
// InitLogStdout attaches stdout to the backend log and starts the logger.
func InitLogStdout(logLevel Level) {
err := BackendLog.AddLogWriter(os.Stdout, logLevel)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", LevelWarn, err)
os.Exit(1)
}
err = BackendLog.Run()
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err)
os.Exit(1)
}
// subsystemLoggers maps each subsystem identifier to its associated logger.
var subsystemLoggers = map[string]*Logger{
SubsystemTags.ADXR: adxrLog,
SubsystemTags.AMGR: amgrLog,
SubsystemTags.CMGR: cmgrLog,
SubsystemTags.KSDB: ksdbLog,
SubsystemTags.KASD: kasdLog,
SubsystemTags.BDAG: bdagLog,
SubsystemTags.CNFG: cnfgLog,
SubsystemTags.DISC: discLog,
SubsystemTags.INDX: indxLog,
SubsystemTags.MINR: minrLog,
SubsystemTags.PEER: peerLog,
SubsystemTags.RPCS: rpcsLog,
SubsystemTags.RPCC: rpccLog,
SubsystemTags.SCRP: scrpLog,
SubsystemTags.SRVR: srvrLog,
SubsystemTags.SYNC: syncLog,
SubsystemTags.TXMP: txmpLog,
SubsystemTags.UTIL: utilLog,
SubsystemTags.PROF: profLog,
SubsystemTags.PROT: protLog,
SubsystemTags.MUXX: muxxLog,
SubsystemTags.GRPC: grpcLog,
SubsystemTags.P2PS: p2psLog,
SubsystemTags.NTAR: ntarLog,
SubsystemTags.DNSS: dnssLog,
SubsystemTags.SNVR: snvrLog,
SubsystemTags.WSVC: wsvcLog,
SubsystemTags.REAC: reacLog,
SubsystemTags.PRNM: prnmLog,
SubsystemTags.BLVL: blvlLog,
}
// InitLog attaches log file and error log file to the backend log.
@@ -65,65 +162,63 @@ func InitLog(logFile, errLogFile string) {
// 280 MB (MB=1000^2 bytes)
err := BackendLog.AddLogFileWithCustomRotator(logFile, LevelTrace, 1000*280, 64)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, LevelTrace, err)
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, LevelTrace, err)
os.Exit(1)
}
err = BackendLog.AddLogFile(errLogFile, LevelWarn)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, LevelWarn, err)
fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, LevelWarn, err)
os.Exit(1)
}
InitLogStdout(LevelInfo)
}
// SetLogLevel sets the logging level for provided subsystem. Invalid
// subsystems are ignored. Uninitialized subsystems are dynamically created as
// needed.
func SetLogLevel(subsystemID string, logLevel string) error {
subsystemLoggersMutex.Lock()
defer subsystemLoggersMutex.Unlock()
func SetLogLevel(subsystemID string, logLevel string) {
// Ignore invalid subsystems.
logger, ok := subsystemLoggers[subsystemID]
if !ok {
return errors.Errorf("'%s' Isn't a valid subsystem", subsystemID)
}
level, ok := LevelFromString(logLevel)
if !ok {
return errors.Errorf("'%s' Isn't a valid log level", logLevel)
return
}
// Defaults to info if the log level is invalid.
level, _ := LevelFromString(logLevel)
logger.SetLevel(level)
return nil
}
// SetLogLevelsString the same as SetLogLevels but also parses the level from a string
func SetLogLevelsString(logLevel string) error {
level, ok := LevelFromString(logLevel)
if !ok {
return errors.Errorf("'%s' Isn't a valid log level", logLevel)
}
SetLogLevels(level)
return nil
}
// SetLogLevels sets the log level for all subsystem loggers to the passed
// level. It also dynamically creates the subsystem loggers as needed, so it
// can be used to initialize the logging system.
func SetLogLevels(logLevel Level) {
subsystemLoggersMutex.Lock()
defer subsystemLoggersMutex.Unlock()
func SetLogLevels(logLevel string) {
// Configure all sub-systems with the new logging level. Dynamically
// create loggers as needed.
for _, logger := range subsystemLoggers {
logger.SetLevel(logLevel)
for subsystemID := range subsystemLoggers {
SetLogLevel(subsystemID, logLevel)
}
}
// DirectionString is a helper function that returns a string that represents
// the direction of a connection (inbound or outbound).
func DirectionString(inbound bool) string {
if inbound {
return "inbound"
}
return "outbound"
}
// PickNoun returns the singular or plural form of a noun depending
// on the count n.
func PickNoun(n uint64, singular, plural string) string {
if n == 1 {
return singular
}
return plural
}
// SupportedSubsystems returns a sorted slice of the supported subsystems for
// logging purposes.
func SupportedSubsystems() []string {
subsystemLoggersMutex.Lock()
defer subsystemLoggersMutex.Unlock()
// Convert the subsystemLoggers map keys to a slice.
subsystems := make([]string, 0, len(subsystemLoggers))
for subsysID := range subsystemLoggers {
@@ -135,9 +230,8 @@ func SupportedSubsystems() []string {
return subsystems
}
func getSubsystem(tag string) (logger *Logger, ok bool) {
subsystemLoggersMutex.Lock()
defer subsystemLoggersMutex.Unlock()
// Get returns a logger of a specific sub system
func Get(tag string) (logger *Logger, ok bool) {
logger, ok = subsystemLoggers[tag]
return
}
@@ -149,8 +243,16 @@ func ParseAndSetLogLevels(logLevel string) error {
// When the specified string doesn't have any delimters, treat it as
// the log level for all subsystems.
if !strings.Contains(logLevel, ",") && !strings.Contains(logLevel, "=") {
// Validate and change the logging level for all subsystems.
return SetLogLevelsString(logLevel)
// Validate debug log level.
if !validLogLevel(logLevel) {
str := "The specified debug level [%s] is invalid"
return errors.Errorf(str, logLevel)
}
// Change the logging level for all subsystems.
SetLogLevels(logLevel)
return nil
}
// Split the specified string into subsystem/level pairs while detecting
@@ -167,16 +269,54 @@ func ParseAndSetLogLevels(logLevel string) error {
subsysID, logLevel := fields[0], fields[1]
// Validate subsystem.
if _, exists := getSubsystem(subsysID); !exists {
if _, exists := Get(subsysID); !exists {
str := "The specified subsystem [%s] is invalid -- " +
"supported subsytems %s"
return errors.Errorf(str, subsysID, strings.Join(SupportedSubsystems(), ", "))
}
err := SetLogLevel(subsysID, logLevel)
if err != nil {
return err
// Validate log level.
if !validLogLevel(logLevel) {
str := "The specified debug level [%s] is invalid"
return errors.Errorf(str, logLevel)
}
SetLogLevel(subsysID, logLevel)
}
return nil
}
// validLogLevel returns whether or not logLevel is a valid debug log level.
func validLogLevel(logLevel string) bool {
switch logLevel {
case "trace":
fallthrough
case "debug":
fallthrough
case "info":
fallthrough
case "warn":
fallthrough
case "error":
fallthrough
case "critical":
return true
}
return false
}
// LogClosure is a closure that can be printed with %s to be used to
// generate expensive-to-create data for a detailed log level and avoid doing
// the work if the data isn't printed.
type LogClosure func() string
func (c LogClosure) String() string {
return c()
}
// NewLogClosure casts a function to a LogClosure.
// See LogClosure for details.
func NewLogClosure(c func() string) LogClosure {
return c
}

View File

@@ -36,22 +36,362 @@ import (
"bytes"
"fmt"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/pkg/errors"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"github.com/jrick/logrotate/rotator"
)
// defaultFlags specifies changes to the default logger behavior. It is set
// during package init and configured using the LOGFLAGS environment variable.
// New logger backends can override these default flags using WithFlags.
var defaultFlags uint32
// Flags to modify Backend's behavior.
const (
// Llongfile modifies the logger output to include full path and line number
// of the logging callsite, e.g. /a/b/c/main.go:123.
Llongfile uint32 = 1 << iota
// Lshortfile modifies the logger output to include filename and line number
// of the logging callsite, e.g. main.go:123. Overrides Llongfile.
Lshortfile
)
// Read logger flags from the LOGFLAGS environment variable. Multiple flags can
// be set at once, separated by commas.
func init() {
for _, f := range strings.Split(os.Getenv("LOGFLAGS"), ",") {
switch f {
case "longfile":
defaultFlags |= Llongfile
case "shortfile":
defaultFlags |= Lshortfile
}
}
}
// Level is the level at which a logger is configured. All messages sent
// to a level which is below the current level are filtered.
type Level uint32
// Level constants.
const (
LevelTrace Level = iota
LevelDebug
LevelInfo
LevelWarn
LevelError
LevelCritical
LevelOff
)
// levelStrs defines the human-readable names for each logging level.
var levelStrs = [...]string{"TRC", "DBG", "INF", "WRN", "ERR", "CRT", "OFF"}
// LevelFromString returns a level based on the input string s. If the input
// can't be interpreted as a valid log level, the info level and false is
// returned.
func LevelFromString(s string) (l Level, ok bool) {
switch strings.ToLower(s) {
case "trace", "trc":
return LevelTrace, true
case "debug", "dbg":
return LevelDebug, true
case "info", "inf":
return LevelInfo, true
case "warn", "wrn":
return LevelWarn, true
case "error", "err":
return LevelError, true
case "critical", "crt":
return LevelCritical, true
case "off":
return LevelOff, true
default:
return LevelInfo, false
}
}
// String returns the tag of the logger used in log messages, or "OFF" if
// the level will not produce any log output.
func (l Level) String() string {
if l >= LevelOff {
return "OFF"
}
return levelStrs[l]
}
// NewBackend creates a new logger backend.
func NewBackend(opts ...BackendOption) *Backend {
b := &Backend{flag: defaultFlags, stdoutLevel: LevelInfo}
for _, o := range opts {
o(b)
}
return b
}
type backendLogRotator struct {
*rotator.Rotator
logLevel Level
}
// Backend is a logging backend. Subsystems created from the backend write to
// the backend's Writer. Backend provides atomic writes to the Writer from all
// subsystems.
type Backend struct {
rotators []*backendLogRotator
mu sync.Mutex // ensures atomic writes
flag uint32
stdoutLevel Level
}
// BackendOption is a function used to modify the behavior of a Backend.
type BackendOption func(b *Backend)
// WithFlags configures a Backend to use the specified flags rather than using
// the package's defaults as determined through the LOGFLAGS environment
// variable.
func WithFlags(flags uint32) BackendOption {
return func(b *Backend) {
b.flag = flags
}
}
// bufferPool defines a concurrent safe free list of byte slices used to provide
// temporary buffers for formatting log messages prior to outputting them.
var bufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 0, 120)
return &b // pointer to slice to avoid boxing alloc
},
}
// buffer returns a byte slice from the free list. A new buffer is allocated if
// there are not any available on the free list. The returned byte slice should
// be returned to the fee list by using the recycleBuffer function when the
// caller is done with it.
func buffer() *[]byte {
return bufferPool.Get().(*[]byte)
}
// recycleBuffer puts the provided byte slice, which should have been obtain via
// the buffer function, back on the free list.
func recycleBuffer(b *[]byte) {
*b = (*b)[:0]
bufferPool.Put(b)
}
// From stdlib log package.
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid
// zero-padding.
func itoa(buf *[]byte, i int, wid int) {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
*buf = append(*buf, b[bp:]...)
}
// Appends a header in the default format 'YYYY-MM-DD hh:mm:ss.sss [LVL] TAG: '.
// If either of the Lshortfile or Llongfile flags are specified, the file named
// and line number are included after the tag and before the final colon.
func formatHeader(buf *[]byte, t mstime.Time, lvl, tag string, file string, line int) {
year, month, day := t.Date()
hour, min, sec := t.Clock()
ms := t.Millisecond()
itoa(buf, year, 4)
*buf = append(*buf, '-')
itoa(buf, int(month), 2)
*buf = append(*buf, '-')
itoa(buf, day, 2)
*buf = append(*buf, ' ')
itoa(buf, hour, 2)
*buf = append(*buf, ':')
itoa(buf, min, 2)
*buf = append(*buf, ':')
itoa(buf, sec, 2)
*buf = append(*buf, '.')
itoa(buf, ms, 3)
*buf = append(*buf, " ["...)
*buf = append(*buf, lvl...)
*buf = append(*buf, "] "...)
*buf = append(*buf, tag...)
if file != "" {
*buf = append(*buf, ' ')
*buf = append(*buf, file...)
*buf = append(*buf, ':')
itoa(buf, line, -1)
}
*buf = append(*buf, ": "...)
}
// calldepth is the call depth of the callsite function relative to the
// caller of the subsystem logger. It is used to recover the filename and line
// number of the logging call if either the short or long file flags are
// specified.
const calldepth = 3
// callsite returns the file name and line number of the callsite to the
// subsystem logger.
func callsite(flag uint32) (string, int) {
_, file, line, ok := runtime.Caller(calldepth)
if !ok {
return "???", 0
}
if flag&Lshortfile != 0 {
short := file
for i := len(file) - 1; i > 0; i-- {
if os.IsPathSeparator(file[i]) {
short = file[i+1:]
break
}
}
file = short
}
return file, line
}
const (
defaultThresholdKB = 100 * 1000 // 100 MB logs by default.
defaultMaxRolls = 8 // keep 8 last logs by default.
)
// AddLogFile adds a file which the log will write into on a certain
// log level with the default log rotation settings. It'll create the file if it doesn't exist.
func (b *Backend) AddLogFile(logFile string, logLevel Level) error {
return b.AddLogFileWithCustomRotator(logFile, logLevel, defaultThresholdKB, defaultMaxRolls)
}
// AddLogFileWithCustomRotator adds a file which the log will write into on a certain
// log level, with the specified log rotation settings.
// It'll create the file if it doesn't exist.
func (b *Backend) AddLogFileWithCustomRotator(logFile string, logLevel Level, thresholdKB int64, maxRolls int) error {
logDir, _ := filepath.Split(logFile)
// if the logDir is empty then `logFile` is in the cwd and there's no need to create any directory.
if logDir != "" {
err := os.MkdirAll(logDir, 0700)
if err != nil {
return errors.Errorf("failed to create log directory: %+v", err)
}
}
r, err := rotator.New(logFile, thresholdKB, false, maxRolls)
if err != nil {
return errors.Errorf("failed to create file rotator: %s", err)
}
b.rotators = append(b.rotators, &backendLogRotator{
Rotator: r,
logLevel: logLevel,
})
return nil
}
// print outputs a log message to the writer associated with the backend after
// creating a prefix for the given level and tag according to the formatHeader
// function and formatting the provided arguments using the default formatting
// rules.
func (b *Backend) print(lvl Level, tag string, args ...interface{}) {
t := mstime.Now() // get as early as possible
bytebuf := buffer()
var file string
var line int
if b.flag&(Lshortfile|Llongfile) != 0 {
file, line = callsite(b.flag)
}
formatHeader(bytebuf, t, lvl.String(), tag, file, line)
buf := bytes.NewBuffer(*bytebuf)
fmt.Fprintln(buf, args...)
*bytebuf = buf.Bytes()
b.write(lvl, *bytebuf)
recycleBuffer(bytebuf)
}
// printf outputs a log message to the writer associated with the backend after
// creating a prefix for the given level and tag according to the formatHeader
// function and formatting the provided arguments according to the given format
// specifier.
func (b *Backend) printf(lvl Level, tag string, format string, args ...interface{}) {
t := mstime.Now() // get as early as possible
bytebuf := buffer()
var file string
var line int
if b.flag&(Lshortfile|Llongfile) != 0 {
file, line = callsite(b.flag)
}
formatHeader(bytebuf, t, lvl.String(), tag, file, line)
buf := bytes.NewBuffer(*bytebuf)
fmt.Fprintf(buf, format, args...)
*bytebuf = append(buf.Bytes(), '\n')
b.write(lvl, *bytebuf)
recycleBuffer(bytebuf)
}
func (b *Backend) write(lvl Level, bytesToWrite []byte) {
b.mu.Lock()
defer b.mu.Unlock()
if lvl >= b.StdoutLevel() {
os.Stdout.Write(bytesToWrite)
}
for _, r := range b.rotators {
if lvl >= r.logLevel {
r.Write(bytesToWrite)
}
}
}
// StdoutLevel returns the current stdout logging level
func (b *Backend) StdoutLevel() Level {
return Level(atomic.LoadUint32((*uint32)(&b.stdoutLevel)))
}
// SetStdoutLevel changes the logging level to the passed level.
func (b *Backend) SetStdoutLevel(level Level) {
atomic.StoreUint32((*uint32)(&b.stdoutLevel), uint32(level))
}
// Close finalizes all log rotators for this backend
func (b *Backend) Close() {
for _, r := range b.rotators {
r.Close()
}
}
// Logger returns a new logger for a particular subsystem that writes to the
// Backend b. A tag describes the subsystem and is included in all log
// messages. The logger uses the info verbosity level by default.
func (b *Backend) Logger(subsystemTag string) *Logger {
return &Logger{LevelInfo, subsystemTag, b}
}
// Logger is a subsystem logger for a Backend.
type Logger struct {
lvl Level // atomic
tag string
b *Backend
writeChan chan<- logEntry
}
type logEntry struct {
log []byte
level Level
lvl Level // atomic
tag string
b *Backend
}
// Trace formats message using the default formats for its operands, prepends
@@ -131,7 +471,7 @@ func (l *Logger) Criticalf(format string, args ...interface{}) {
func (l *Logger) Write(logLevel Level, args ...interface{}) {
lvl := l.Level()
if lvl <= logLevel {
l.print(logLevel, l.tag, args...)
l.b.print(logLevel, l.tag, args...)
}
}
@@ -140,7 +480,7 @@ func (l *Logger) Write(logLevel Level, args ...interface{}) {
func (l *Logger) Writef(logLevel Level, format string, args ...interface{}) {
lvl := l.Level()
if lvl <= logLevel {
l.printf(logLevel, l.tag, format, args...)
l.b.printf(logLevel, l.tag, format, args...)
}
}
@@ -158,135 +498,3 @@ func (l *Logger) SetLevel(level Level) {
func (l *Logger) Backend() *Backend {
return l.b
}
// printf outputs a log message to the writer associated with the backend after
// creating a prefix for the given level and tag according to the formatHeader
// function and formatting the provided arguments according to the given format
// specifier.
func (l *Logger) printf(lvl Level, tag string, format string, args ...interface{}) {
t := mstime.Now() // get as early as possible
var file string
var line int
if l.b.flag&(LogFlagShortFile|LogFlagLongFile) != 0 {
file, line = callsite(l.b.flag)
}
buf := make([]byte, 0, normalLogSize)
formatHeader(&buf, t, lvl.String(), tag, file, line)
bytesBuf := bytes.NewBuffer(buf)
_, _ = fmt.Fprintf(bytesBuf, format, args...)
bytesBuf.WriteByte('\n')
if !l.b.IsRunning() {
_, _ = fmt.Fprintf(os.Stderr, bytesBuf.String())
panic("Writing to the logger when it's not running")
}
l.writeChan <- logEntry{bytesBuf.Bytes(), lvl}
}
// print outputs a log message to the writer associated with the backend after
// creating a prefix for the given level and tag according to the formatHeader
// function and formatting the provided arguments using the default formatting
// rules.
func (l *Logger) print(lvl Level, tag string, args ...interface{}) {
if atomic.LoadUint32(&l.b.isRunning) == 0 {
panic("printing log without initializing")
}
t := mstime.Now() // get as early as possible
var file string
var line int
if l.b.flag&(LogFlagShortFile|LogFlagLongFile) != 0 {
file, line = callsite(l.b.flag)
}
buf := make([]byte, 0, normalLogSize)
formatHeader(&buf, t, lvl.String(), tag, file, line)
bytesBuf := bytes.NewBuffer(buf)
_, _ = fmt.Fprintln(bytesBuf, args...)
if !l.b.IsRunning() {
panic("Writing to the logger when it's not running")
}
l.writeChan <- logEntry{bytesBuf.Bytes(), lvl}
}
// From stdlib log package.
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
func itoa(buf *[]byte, i int, wid int) {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
*buf = append(*buf, b[bp:]...)
}
// Appends a header in the default format 'YYYY-MM-DD hh:mm:ss.sss [LVL] TAG: '.
// If either of the LogFlagShortFile or LogFlagLongFile flags are specified, the file named
// and line number are included after the tag and before the final colon.
func formatHeader(buf *[]byte, t mstime.Time, lvl, tag string, file string, line int) {
year, month, day := t.Date()
hour, min, sec := t.Clock()
ms := t.Millisecond()
itoa(buf, year, 4)
*buf = append(*buf, '-')
itoa(buf, int(month), 2)
*buf = append(*buf, '-')
itoa(buf, day, 2)
*buf = append(*buf, ' ')
itoa(buf, hour, 2)
*buf = append(*buf, ':')
itoa(buf, min, 2)
*buf = append(*buf, ':')
itoa(buf, sec, 2)
*buf = append(*buf, '.')
itoa(buf, ms, 3)
*buf = append(*buf, " ["...)
*buf = append(*buf, lvl...)
*buf = append(*buf, "] "...)
*buf = append(*buf, tag...)
if file != "" {
*buf = append(*buf, ' ')
*buf = append(*buf, file...)
*buf = append(*buf, ':')
itoa(buf, line, -1)
}
*buf = append(*buf, ": "...)
}
// calldepth is the call depth of the callsite function relative to the
// caller of the subsystem logger. It is used to recover the filename and line
// number of the logging call if either the short or long file flags are
// specified.
const calldepth = 4
// callsite returns the file name and line number of the callsite to the
// subsystem logger.
func callsite(flag uint32) (string, int) {
_, file, line, ok := runtime.Caller(calldepth)
if !ok {
return "???", 0
}
if flag&LogFlagShortFile != 0 {
short := file
for i := len(file) - 1; i > 0; i-- {
if os.IsPathSeparator(file[i]) {
short = file[i+1:]
break
}
}
file = short
}
return file, line
}

View File

@@ -27,18 +27,3 @@ func LogMemoryStats(log *Logger, functionName string) {
stats.Alloc, stats.HeapIdle-stats.HeapReleased+stats.HeapInuse)
}))
}
// LogClosure is a closure that can be printed with %s to be used to
// generate expensive-to-create data for a detailed log level and avoid doing
// the work if the data isn't printed.
type LogClosure func() string
func (c LogClosure) String() string {
return c()
}
// NewLogClosure casts a function to a LogClosure.
// See LogClosure for details.
func NewLogClosure(c func() string) LogClosure {
return c
}

View File

@@ -6,6 +6,8 @@ package addressmanager
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("ADXR")
var log, _ = logger.Get(logger.SubsystemTags.ADXR)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -43,7 +43,6 @@ func (as *addressStore) restoreNotBannedAddresses() error {
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
databaseKey, err := cursor.Key()
if err != nil {
@@ -67,7 +66,6 @@ func (as *addressStore) restoreBannedAddresses() error {
if err != nil {
return err
}
defer cursor.Close()
for ok := cursor.First(); ok; ok = cursor.Next() {
databaseKey, err := cursor.Key()
if err != nil {

View File

@@ -5,5 +5,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("CMGR")
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -9,5 +9,6 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("CMGR")
var log, _ = logger.Get(logger.SubsystemTags.CMGR)
var spawn = panics.GoroutineWrapperFunc(log)
var spawnAfter = panics.AfterFuncWrapperFunc(log)

View File

@@ -2,6 +2,8 @@ package netadapter
import (
"github.com/kaspanet/kaspad/infrastructure/logger"
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("NTAR")
var log, _ = logger.Get(logger.SubsystemTags.NTAR)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -1,15 +1,12 @@
package grpcserver
import (
"net"
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server/grpcserver/protowire"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"net"
"sync"
"sync/atomic"
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/server"
"google.golang.org/grpc"
@@ -65,17 +62,7 @@ func (c *gRPCConnection) Start(router *router.Router) {
spawn("gRPCConnection.Start-connectionLoops", func() {
err := c.connectionLoops()
if err != nil {
status, isStatus := status.FromError(err)
if isStatus {
switch status.Code() {
case codes.Canceled:
log.Debugf("connectionLoop canceled connection for %s: %s", c.address, err)
default:
log.Errorf("status error from connectionLoops for %s: %s", c.address, err)
}
} else {
log.Errorf("unknown error from connectionLoops for %s: %s", c.address, err)
}
log.Errorf("error from connectionLoops for %s: %s", c.address, err)
}
})
}

View File

@@ -9,5 +9,5 @@ import (
"github.com/kaspanet/kaspad/util/panics"
)
var log = logger.RegisterSubSystem("TXMP")
var log, _ = logger.Get(logger.SubsystemTags.TXMP)
var spawn = panics.GoroutineWrapperFunc(log)

View File

@@ -12,12 +12,7 @@ import (
"github.com/pkg/errors"
)
var errorNil = errors.New("a required field is nil")
func (x *Hash) toDomain() (*externalapi.DomainHash, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "Hash is nil")
}
return externalapi.NewDomainHashFromByteSlice(x.Bytes)
}
@@ -48,9 +43,6 @@ func domainHashesToProto(hashes []*externalapi.DomainHash) []*Hash {
}
func (x *TransactionId) toDomain() (*externalapi.DomainTransactionID, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "TransactionId is nil")
}
return transactionid.FromBytes(x.Bytes)
}
@@ -82,7 +74,7 @@ func wireTransactionIDsToProto(ids []*externalapi.DomainTransactionID) []*Transa
func (x *SubnetworkId) toDomain() (*externalapi.DomainSubnetworkID, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "SubnetworkId is nil")
return nil, nil
}
return subnetworks.FromBytes(x.Bytes)
}
@@ -97,9 +89,6 @@ func domainSubnetworkIDToProto(id *externalapi.DomainSubnetworkID) *SubnetworkId
}
func (x *NetAddress) toAppMessage() (*appmessage.NetAddress, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "NetAddress is nil")
}
if x.Port > math.MaxUint16 {
return nil, errors.Errorf("port number is larger than %d", math.MaxUint16)
}
@@ -119,46 +108,3 @@ func appMessageNetAddressToProto(address *appmessage.NetAddress) *NetAddress {
Port: uint32(address.Port),
}
}
func (x *Outpoint) toAppMessage() (*appmessage.Outpoint, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "Outpoint is nil")
}
transactionID, err := x.TransactionId.toDomain()
if err != nil {
return nil, err
}
return &appmessage.Outpoint{
TxID: *transactionID,
Index: x.Index,
}, nil
}
func (x *UtxoEntry) toAppMessage() (*appmessage.UTXOEntry, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "UtxoEntry is nil")
}
scriptPublicKey, err := x.ScriptPublicKey.toAppMessage()
if err != nil {
return nil, err
}
return &appmessage.UTXOEntry{
Amount: x.Amount,
ScriptPublicKey: scriptPublicKey,
BlockBlueScore: x.BlockBlueScore,
IsCoinbase: x.IsCoinbase,
}, nil
}
func (x *ScriptPublicKey) toAppMessage() (*externalapi.ScriptPublicKey, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "ScriptPublicKey is nil")
}
if x.Version > math.MaxUint16 {
return nil, errors.Errorf("ScriptPublicKey version is bigger then uint16.")
}
return &externalapi.ScriptPublicKey{
Script: x.Script,
Version: uint16(x.Version),
}, nil
}

View File

@@ -6,36 +6,23 @@ import (
)
func (x *KaspadMessage_Addresses) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "KaspadMessage_Addresses is nil")
}
addressList, err := x.Addresses.toAppMessage()
if err != nil {
return nil, err
}
return &appmessage.MsgAddresses{
AddressList: addressList,
}, nil
}
func (x *AddressesMessage) toAppMessage() ([]*appmessage.NetAddress, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "AddressesMessage is nil")
}
if len(x.AddressList) > appmessage.MaxAddressesPerMsg {
protoAddresses := x.Addresses
if len(x.Addresses.AddressList) > appmessage.MaxAddressesPerMsg {
return nil, errors.Errorf("too many addresses for message "+
"[count %d, max %d]", len(x.AddressList), appmessage.MaxAddressesPerMsg)
"[count %d, max %d]", len(x.Addresses.AddressList), appmessage.MaxAddressesPerMsg)
}
addressList := make([]*appmessage.NetAddress, len(x.AddressList))
for i, address := range x.AddressList {
addressList := make([]*appmessage.NetAddress, len(protoAddresses.AddressList))
for i, address := range protoAddresses.AddressList {
var err error
addressList[i], err = address.toAppMessage()
if err != nil {
return nil, err
}
}
return addressList, nil
return &appmessage.MsgAddresses{
AddressList: addressList,
}, nil
}
func (x *KaspadMessage_Addresses) fromAppMessage(msgAddresses *appmessage.MsgAddresses) error {

View File

@@ -6,9 +6,6 @@ import (
)
func (x *KaspadMessage_Block) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "KaspadMessage_Block is nil")
}
return x.Block.toAppMessage()
}
@@ -18,14 +15,21 @@ func (x *KaspadMessage_Block) fromAppMessage(msgBlock *appmessage.MsgBlock) erro
}
func (x *BlockMessage) toAppMessage() (*appmessage.MsgBlock, error) {
if x == nil {
return nil, errors.Wrap(errorNil, "BlockMessage is nil")
}
if len(x.Transactions) > appmessage.MaxTxPerBlock {
return nil, errors.Errorf("too many transactions to fit into a block "+
"[count %d, max %d]", len(x.Transactions), appmessage.MaxTxPerBlock)
}
protoBlockHeader := x.Header
if protoBlockHeader == nil {
return nil, errors.New("block header field cannot be nil")
}
if len(protoBlockHeader.ParentHashes) > appmessage.MaxBlockParents {
return nil, errors.Errorf("block header has %d parents, but the maximum allowed amount "+
"is %d", len(protoBlockHeader.ParentHashes), appmessage.MaxBlockParents)
}
header, err := x.Header.toAppMessage()
if err != nil {
return nil, err

View File

@@ -1,29 +1,10 @@
package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/pkg/errors"
)
import "github.com/kaspanet/kaspad/app/appmessage"
func (x *KaspadMessage_BlockHeaders) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_BlockHeaders is nil")
}
blockHeaders, err := x.BlockHeaders.toAppMessage()
if err != nil {
return nil, err
}
return &appmessage.BlockHeadersMessage{
BlockHeaders: blockHeaders,
}, nil
}
func (x *BlockHeadersMessage) toAppMessage() ([]*appmessage.MsgBlockHeader, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "BlockHeadersMessage is nil")
}
blockHeaders := make([]*appmessage.MsgBlockHeader, len(x.BlockHeaders))
for i, blockHeader := range x.BlockHeaders {
blockHeaders := make([]*appmessage.MsgBlockHeader, len(x.BlockHeaders.BlockHeaders))
for i, blockHeader := range x.BlockHeaders.BlockHeaders {
var err error
blockHeaders[i], err = blockHeader.toAppMessage()
if err != nil {
@@ -31,7 +12,9 @@ func (x *BlockHeadersMessage) toAppMessage() ([]*appmessage.MsgBlockHeader, erro
}
}
return blockHeaders, nil
return &appmessage.BlockHeadersMessage{
BlockHeaders: blockHeaders,
}, nil
}
func (x *KaspadMessage_BlockHeaders) fromAppMessage(blockHeadersMessage *appmessage.BlockHeadersMessage) error {

View File

@@ -2,32 +2,21 @@ package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/pkg/errors"
)
func (x *KaspadMessage_BlockLocator) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_BlockLocator is nil")
if len(x.BlockLocator.Hashes) > appmessage.MaxBlockLocatorsPerMsg {
return nil, errors.Errorf("too many block locator hashes for message "+
"[count %d, max %d]", len(x.BlockLocator.Hashes), appmessage.MaxBlockLocatorsPerMsg)
}
hashes, err := x.BlockLocator.toAppMessage()
hashes, err := protoHashesToDomain(x.BlockLocator.Hashes)
if err != nil {
return nil, err
}
return &appmessage.MsgBlockLocator{BlockLocatorHashes: hashes}, nil
}
func (x *BlockLocatorMessage) toAppMessage() ([]*externalapi.DomainHash, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "BlockLocatorMessage is nil")
}
if len(x.Hashes) > appmessage.MaxBlockLocatorsPerMsg {
return nil, errors.Errorf("too many block locator hashes for message "+
"[count %d, max %d]", len(x.Hashes), appmessage.MaxBlockLocatorsPerMsg)
}
return protoHashesToDomain(x.Hashes)
}
func (x *KaspadMessage_BlockLocator) fromAppMessage(msgBlockLocator *appmessage.MsgBlockLocator) error {
if len(msgBlockLocator.BlockLocatorHashes) > appmessage.MaxBlockLocatorsPerMsg {
return errors.Errorf("too many block locator hashes for message "+

View File

@@ -1,14 +1,8 @@
package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/pkg/errors"
)
import "github.com/kaspanet/kaspad/app/appmessage"
func (x *KaspadMessage_DoneHeaders) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_DoneHeaders is nil")
}
return &appmessage.MsgDoneHeaders{}, nil
}

View File

@@ -1,14 +1,8 @@
package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/pkg/errors"
)
import "github.com/kaspanet/kaspad/app/appmessage"
func (x *KaspadMessage_DonePruningPointUtxoSetChunks) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_DonePruningPointUtxoSetChunks is nil")
}
return &appmessage.MsgDonePruningPointUTXOSetChunks{}, nil
}

View File

@@ -8,9 +8,6 @@ import (
)
func (x *BlockHeaderMessage) toAppMessage() (*appmessage.MsgBlockHeader, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "BlockHeaderMessage is nil")
}
if len(x.ParentHashes) > appmessage.MaxBlockParents {
return nil, errors.Errorf("block header has %d parents, but the maximum allowed amount "+
"is %d", len(x.ParentHashes), appmessage.MaxBlockParents)
@@ -20,14 +17,17 @@ func (x *BlockHeaderMessage) toAppMessage() (*appmessage.MsgBlockHeader, error)
if err != nil {
return nil, err
}
hashMerkleRoot, err := x.HashMerkleRoot.toDomain()
if err != nil {
return nil, err
}
acceptedIDMerkleRoot, err := x.AcceptedIdMerkleRoot.toDomain()
if err != nil {
return nil, err
}
utxoCommitment, err := x.UtxoCommitment.toDomain()
if err != nil {
return nil, err

View File

@@ -1,14 +1,8 @@
package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/pkg/errors"
)
import "github.com/kaspanet/kaspad/app/appmessage"
func (x *KaspadMessage_IbdBlock) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_IbdBlock is nil")
}
msgBlock, err := x.IbdBlock.toAppMessage()
if err != nil {
return nil, err

View File

@@ -2,25 +2,14 @@ package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/pkg/errors"
)
func (x *KaspadMessage_IbdBlockLocator) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_IbdBlockLocator is nil")
}
return x.IbdBlockLocator.toAppMessage()
}
func (x *IbdBlockLocatorMessage) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "IbdBlockLocatorMessage is nil")
}
targetHash, err := x.TargetHash.toDomain()
targetHash, err := x.IbdBlockLocator.TargetHash.toDomain()
if err != nil {
return nil, err
}
blockLocatorHash, err := protoHashesToDomain(x.BlockLocatorHashes)
blockLocatorHash, err := protoHashesToDomain(x.IbdBlockLocator.BlockLocatorHashes)
if err != nil {
return nil, err
}

View File

@@ -1,22 +1,9 @@
package protowire
import (
"github.com/kaspanet/kaspad/app/appmessage"
"github.com/pkg/errors"
)
import "github.com/kaspanet/kaspad/app/appmessage"
func (x *KaspadMessage_IbdBlockLocatorHighestHash) toAppMessage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "KaspadMessage_IbdBlockLocatorHighestHash is nil")
}
return x.IbdBlockLocatorHighestHash.toAppMessgage()
}
func (x *IbdBlockLocatorHighestHashMessage) toAppMessgage() (appmessage.Message, error) {
if x == nil {
return nil, errors.Wrapf(errorNil, "IbdBlockLocatorHighestHashMessage is nil")
}
highestHash, err := x.HighestHash.toDomain()
highestHash, err := x.IbdBlockLocatorHighestHash.HighestHash.toDomain()
if err != nil {
return nil, err
}
@@ -24,7 +11,6 @@ func (x *IbdBlockLocatorHighestHashMessage) toAppMessgage() (appmessage.Message,
return &appmessage.MsgIBDBlockLocatorHighestHash{
HighestHash: highestHash,
}, nil
}
func (x *KaspadMessage_IbdBlockLocatorHighestHash) fromAppMessage(message *appmessage.MsgIBDBlockLocatorHighestHash) error {

Some files were not shown because too many files have changed in this diff Show More