(#DEV-14) Removed segwit from blockchain package

This commit is contained in:
Mike Zak 2018-06-10 13:47:28 +03:00
parent ffdb76ecec
commit cea07f3b98
23 changed files with 138 additions and 624 deletions

View File

@ -58,29 +58,27 @@ type orphanBlock struct {
// However, the returned snapshot must be treated as immutable since it is
// shared by all callers.
type BestState struct {
Hash chainhash.Hash // The hash of the block.
Height int32 // The height of the block.
Bits uint32 // The difficulty bits of the block.
BlockSize uint64 // The size of the block.
BlockWeight uint64 // The weight of the block.
NumTxns uint64 // The number of txns in the block.
TotalTxns uint64 // The total number of txns in the chain.
MedianTime time.Time // Median time as per CalcPastMedianTime.
Hash chainhash.Hash // The hash of the block.
Height int32 // The height of the block.
Bits uint32 // The difficulty bits of the block.
BlockSize uint64 // The size of the block.
NumTxns uint64 // The number of txns in the block.
TotalTxns uint64 // The total number of txns in the chain.
MedianTime time.Time // Median time as per CalcPastMedianTime.
}
// newBestState returns a new best stats instance for the given parameters.
func newBestState(node *blockNode, blockSize, blockWeight, numTxns,
func newBestState(node *blockNode, blockSize, numTxns,
totalTxns uint64, medianTime time.Time) *BestState {
return &BestState{
Hash: node.hash,
Height: node.height,
Bits: node.bits,
BlockSize: blockSize,
BlockWeight: blockWeight,
NumTxns: numTxns,
TotalTxns: totalTxns,
MedianTime: medianTime,
Hash: node.hash,
Height: node.height,
Bits: node.bits,
BlockSize: blockSize,
NumTxns: numTxns,
TotalTxns: totalTxns,
MedianTime: medianTime,
}
}
@ -99,7 +97,6 @@ type BlockChain struct {
timeSource MedianTimeSource
sigCache *txscript.SigCache
indexManager IndexManager
hashCache *txscript.HashCache
// The following fields are calculated based upon the provided chain
// parameters. They are also set when the instance is created and
@ -600,8 +597,7 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U
b.stateLock.RUnlock()
numTxns := uint64(len(block.MsgBlock().Transactions))
blockSize := uint64(block.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(block))
state := newBestState(node, blockSize, blockWeight, numTxns,
state := newBestState(node, blockSize, numTxns,
curTotalTxns+numTxns, node.CalcPastMedianTime())
// Atomically insert info into the database.
@ -712,9 +708,8 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view
b.stateLock.RUnlock()
numTxns := uint64(len(prevBlock.MsgBlock().Transactions))
blockSize := uint64(prevBlock.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(prevBlock))
newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions))
state := newBestState(prevNode, blockSize, blockWeight, numTxns,
state := newBestState(prevNode, blockSize, numTxns,
newTotalTxns, prevNode.CalcPastMedianTime())
err = b.db.Update(func(dbTx database.Tx) error {
@ -1629,16 +1624,6 @@ type Config struct {
// This field can be nil if the caller does not wish to make use of an
// index manager.
IndexManager IndexManager
// HashCache defines a transaction hash mid-state cache to use when
// validating transactions. This cache has the potential to greatly
// speed up transaction validation as re-using the pre-calculated
// mid-state eliminates the O(N^2) validation complexity due to the
// SigHashAll flag.
//
// This field can be nil if the caller is not interested in using a
// signature cache.
HashCache *txscript.HashCache
}
// New returns a BlockChain instance using the provided configuration details.
@ -1688,7 +1673,6 @@ func New(config *Config) (*BlockChain, error) {
maxRetargetTimespan: targetTimespan * adjustmentFactor,
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
index: newBlockIndex(config.DB, params),
hashCache: config.HashCache,
bestChain: newChainView(nil),
orphans: make(map[chainhash.Hash]*orphanBlock),
prevOrphans: make(map[chainhash.Hash][]*orphanBlock),

View File

@ -987,8 +987,7 @@ func (b *BlockChain) createChainState() error {
// genesis block, use its timestamp for the median time.
numTxns := uint64(len(genesisBlock.MsgBlock().Transactions))
blockSize := uint64(genesisBlock.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(genesisBlock))
b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns,
b.stateSnapshot = newBestState(node, blockSize, numTxns,
numTxns, time.Unix(node.timestamp, 0))
// Create the initial the database chain state including creating the
@ -1192,10 +1191,8 @@ func (b *BlockChain) initChainState() error {
// Initialize the state related to the best block.
blockSize := uint64(len(blockBytes))
blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block)))
numTxns := uint64(len(block.Transactions))
b.stateSnapshot = newBestState(tip, blockSize, blockWeight,
numTxns, state.totalTxns, tip.CalcPastMedianTime())
b.stateSnapshot = newBestState(tip, blockSize, numTxns, state.totalTxns, tip.CalcPastMedianTime())
return nil
})

View File

@ -41,10 +41,6 @@ const (
// maximum allowed size.
ErrBlockTooBig
// ErrBlockWeightTooHigh indicates that the block's computed weight
// metric exceeds the maximum allowed value.
ErrBlockWeightTooHigh
// ErrBlockVersionTooOld indicates the block version is too old and is
// no longer accepted since the majority of the network has upgraded
// to a newer version.
@ -195,20 +191,6 @@ const (
// the stack.
ErrScriptValidation
// ErrUnexpectedWitness indicates that a block includes transactions
// with witness data, but doesn't also have a witness commitment within
// the coinbase transaction.
ErrUnexpectedWitness
// ErrInvalidWitnessCommitment indicates that a block's witness
// commitment is not well formed.
ErrInvalidWitnessCommitment
// ErrWitnessCommitmentMismatch indicates that the witness commitment
// included in the block's coinbase transaction doesn't match the
// manually computed witness commitment.
ErrWitnessCommitmentMismatch
// ErrPreviousBlockUnknown indicates that the previous block is not known.
ErrPreviousBlockUnknown
@ -224,49 +206,45 @@ const (
// Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrBlockWeightTooHigh: "ErrBlockWeightTooHigh",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld",
ErrTimeTooNew: "ErrTimeTooNew",
ErrDifficultyTooLow: "ErrDifficultyTooLow",
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
ErrHighHash: "ErrHighHash",
ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrBadCheckpoint: "ErrBadCheckpoint",
ErrForkTooOld: "ErrForkTooOld",
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs",
ErrNoTxOutputs: "ErrNoTxOutputs",
ErrTxTooBig: "ErrTxTooBig",
ErrBadTxOutValue: "ErrBadTxOutValue",
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
ErrImmatureSpend: "ErrImmatureSpend",
ErrSpendTooHigh: "ErrSpendTooHigh",
ErrBadFees: "ErrBadFees",
ErrTooManySigOps: "ErrTooManySigOps",
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
ErrMultipleCoinbases: "ErrMultipleCoinbases",
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrUnexpectedWitness: "ErrUnexpectedWitness",
ErrInvalidWitnessCommitment: "ErrInvalidWitnessCommitment",
ErrWitnessCommitmentMismatch: "ErrWitnessCommitmentMismatch",
ErrPreviousBlockUnknown: "ErrPreviousBlockUnknown",
ErrInvalidAncestorBlock: "ErrInvalidAncestorBlock",
ErrPrevBlockNotBest: "ErrPrevBlockNotBest",
ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld",
ErrTimeTooNew: "ErrTimeTooNew",
ErrDifficultyTooLow: "ErrDifficultyTooLow",
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
ErrHighHash: "ErrHighHash",
ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrBadCheckpoint: "ErrBadCheckpoint",
ErrForkTooOld: "ErrForkTooOld",
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs",
ErrNoTxOutputs: "ErrNoTxOutputs",
ErrTxTooBig: "ErrTxTooBig",
ErrBadTxOutValue: "ErrBadTxOutValue",
ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrBadTxInput: "ErrBadTxInput",
ErrMissingTxOut: "ErrMissingTxOut",
ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrDuplicateTx: "ErrDuplicateTx",
ErrOverwriteTx: "ErrOverwriteTx",
ErrImmatureSpend: "ErrImmatureSpend",
ErrSpendTooHigh: "ErrSpendTooHigh",
ErrBadFees: "ErrBadFees",
ErrTooManySigOps: "ErrTooManySigOps",
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
ErrMultipleCoinbases: "ErrMultipleCoinbases",
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrPreviousBlockUnknown: "ErrPreviousBlockUnknown",
ErrInvalidAncestorBlock: "ErrInvalidAncestorBlock",
ErrPrevBlockNotBest: "ErrPrevBlockNotBest",
}
// String returns the ErrorCode as a human-readable name.

View File

@ -16,7 +16,6 @@ func TestErrorCodeStringer(t *testing.T) {
}{
{ErrDuplicateBlock, "ErrDuplicateBlock"},
{ErrBlockTooBig, "ErrBlockTooBig"},
{ErrBlockWeightTooHigh, "ErrBlockWeightTooHigh"},
{ErrBlockVersionTooOld, "ErrBlockVersionTooOld"},
{ErrInvalidTime, "ErrInvalidTime"},
{ErrTimeTooOld, "ErrTimeTooOld"},
@ -52,9 +51,6 @@ func TestErrorCodeStringer(t *testing.T) {
{ErrBadCoinbaseHeight, "ErrBadCoinbaseHeight"},
{ErrScriptMalformed, "ErrScriptMalformed"},
{ErrScriptValidation, "ErrScriptValidation"},
{ErrUnexpectedWitness, "ErrUnexpectedWitness"},
{ErrInvalidWitnessCommitment, "ErrInvalidWitnessCommitment"},
{ErrWitnessCommitmentMismatch, "ErrWitnessCommitmentMismatch"},
{ErrPreviousBlockUnknown, "ErrPreviousBlockUnknown"},
{ErrInvalidAncestorBlock, "ErrInvalidAncestorBlock"},
{ErrPrevBlockNotBest, "ErrPrevBlockNotBest"},

View File

@ -309,7 +309,7 @@ func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash {
for _, tx := range txns {
utilTxns = append(utilTxns, btcutil.NewTx(tx))
}
merkles := blockchain.BuildMerkleTreeStore(utilTxns, false)
merkles := blockchain.BuildMerkleTreeStore(utilTxns)
return *merkles[len(merkles)-1]
}

View File

@ -51,18 +51,6 @@ const (
// hash.
addrKeyTypeScriptHash = 1
// addrKeyTypePubKeyHash is the address type in an address key which
// represents a pay-to-witness-pubkey-hash address. This is required
// as the 20-byte data push of a p2wkh witness program may be the same
// data push used a p2pkh address.
addrKeyTypeWitnessPubKeyHash = 2
// addrKeyTypeScriptHash is the address type in an address key which
// represents a pay-to-witness-script-hash address. This is required,
// as p2wsh are distinct from p2sh addresses since they use a new
// script template, as well as a 32-byte data push.
addrKeyTypeWitnessScriptHash = 3
// Size of a transaction entry. It consists of 4 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 4 + 4 + 4
@ -546,24 +534,6 @@ func addrToKey(addr btcutil.Address) ([addrKeySize]byte, error) {
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.AddressPubKeyHash().Hash160()[:])
return result, nil
case *btcutil.AddressWitnessScriptHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeWitnessScriptHash
// P2WSH outputs utilize a 32-byte data push created by hashing
// the script with sha256 instead of hash160. In order to keep
// all address entries within the database uniform and compact,
// we use a hash160 here to reduce the size of the salient data
// push to 20-bytes.
copy(result[1:], btcutil.Hash160(addr.ScriptAddress()))
return result, nil
case *btcutil.AddressWitnessPubKeyHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeWitnessPubKeyHash
copy(result[1:], addr.Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType

View File

@ -5,42 +5,12 @@
package blockchain
import (
"bytes"
"fmt"
"math"
"github.com/daglabs/btcd/chaincfg/chainhash"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcutil"
)
const (
// CoinbaseWitnessDataLen is the required length of the only element within
// the coinbase's witness data if the coinbase transaction contains a
// witness commitment.
CoinbaseWitnessDataLen = 32
// CoinbaseWitnessPkScriptLength is the length of the public key script
// containing an OP_RETURN, the WitnessMagicBytes, and the witness
// commitment itself. In order to be a valid candidate for the output
// containing the witness commitment
CoinbaseWitnessPkScriptLength = 38
)
var (
// WitnessMagicBytes is the prefix marker within the public key script
// of a coinbase output to indicate that this output holds the witness
// commitment for a block.
WitnessMagicBytes = []byte{
txscript.OP_RETURN,
txscript.OP_DATA_36,
0xaa,
0x21,
0xa9,
0xed,
}
)
// nextPowerOfTwo returns the next highest power of two from a given number if
// it is not already a power of two. This is a helper function used during the
// calculation of a merkle tree.
@ -96,12 +66,7 @@ func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.
// are calculated by concatenating the left node with itself before hashing.
// Since this function uses nodes that are pointers to the hashes, empty nodes
// will be nil.
//
// The additional bool parameter indicates if we are generating the merkle tree
// using witness transaction id's rather than regular transaction id's. This
// also presents an additional case wherein the wtxid of the coinbase transaction
// is the zeroHash.
func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash.Hash {
func BuildMerkleTreeStore(transactions []*btcutil.Tx) []*chainhash.Hash {
// Calculate how many entries are required to hold the binary merkle
// tree as a linear array and create an array of that size.
nextPoT := nextPowerOfTwo(len(transactions))
@ -110,21 +75,7 @@ func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash
// Create the base transaction hashes and populate the array with them.
for i, tx := range transactions {
// If we're computing a witness merkle root, instead of the
// regular txid, we use the modified wtxid which includes a
// transaction's witness data within the digest. Additionally,
// the coinbase's wtxid is all zeroes.
switch {
case witness && i == 0:
var zeroHash chainhash.Hash
merkles[i] = &zeroHash
case witness:
wSha := tx.MsgTx().WitnessHash()
merkles[i] = &wSha
default:
merkles[i] = tx.Hash()
}
merkles[i] = tx.Hash()
}
// Start the array offset after the last transaction and adjusted to the
@ -153,113 +104,3 @@ func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash
return merkles
}
// ExtractWitnessCommitment attempts to locate, and return the witness
// commitment for a block. The witness commitment is of the form:
// SHA256(witness root || witness nonce). The function additionally returns a
// boolean indicating if the witness root was located within any of the txOut's
// in the passed transaction. The witness commitment is stored as the data push
// for an OP_RETURN with special magic bytes to aide in location.
func ExtractWitnessCommitment(tx *btcutil.Tx) ([]byte, bool) {
// The witness commitment *must* be located within one of the coinbase
// transaction's outputs.
if !IsCoinBase(tx) {
return nil, false
}
msgTx := tx.MsgTx()
for i := len(msgTx.TxOut) - 1; i >= 0; i-- {
// The public key script that contains the witness commitment
// must shared a prefix with the WitnessMagicBytes, and be at
// least 38 bytes.
pkScript := msgTx.TxOut[i].PkScript
if len(pkScript) >= CoinbaseWitnessPkScriptLength &&
bytes.HasPrefix(pkScript, WitnessMagicBytes) {
// The witness commitment itself is a 32-byte hash
// directly after the WitnessMagicBytes. The remaining
// bytes beyond the 38th byte currently have no consensus
// meaning.
start := len(WitnessMagicBytes)
end := CoinbaseWitnessPkScriptLength
return msgTx.TxOut[i].PkScript[start:end], true
}
}
return nil, false
}
// ValidateWitnessCommitment validates the witness commitment (if any) found
// within the coinbase transaction of the passed block.
func ValidateWitnessCommitment(blk *btcutil.Block) error {
// If the block doesn't have any transactions at all, then we won't be
// able to extract a commitment from the non-existent coinbase
// transaction. So we exit early here.
if len(blk.Transactions()) == 0 {
str := "cannot validate witness commitment of block without " +
"transactions"
return ruleError(ErrNoTransactions, str)
}
coinbaseTx := blk.Transactions()[0]
if len(coinbaseTx.MsgTx().TxIn) == 0 {
return ruleError(ErrNoTxInputs, "transaction has no inputs")
}
witnessCommitment, witnessFound := ExtractWitnessCommitment(coinbaseTx)
// If we can't find a witness commitment in any of the coinbase's
// outputs, then the block MUST NOT contain any transactions with
// witness data.
if !witnessFound {
for _, tx := range blk.Transactions() {
msgTx := tx.MsgTx()
if msgTx.HasWitness() {
str := fmt.Sprintf("block contains transaction with witness" +
" data, yet no witness commitment present")
return ruleError(ErrUnexpectedWitness, str)
}
}
return nil
}
// At this point the block contains a witness commitment, so the
// coinbase transaction MUST have exactly one witness element within
// its witness data and that element must be exactly
// CoinbaseWitnessDataLen bytes.
coinbaseWitness := coinbaseTx.MsgTx().TxIn[0].Witness
if len(coinbaseWitness) != 1 {
str := fmt.Sprintf("the coinbase transaction has %d items in "+
"its witness stack when only one is allowed",
len(coinbaseWitness))
return ruleError(ErrInvalidWitnessCommitment, str)
}
witnessNonce := coinbaseWitness[0]
if len(witnessNonce) != CoinbaseWitnessDataLen {
str := fmt.Sprintf("the coinbase transaction witness nonce "+
"has %d bytes when it must be %d bytes",
len(witnessNonce), CoinbaseWitnessDataLen)
return ruleError(ErrInvalidWitnessCommitment, str)
}
// Finally, with the preliminary checks out of the way, we can check if
// the extracted witnessCommitment is equal to:
// SHA256(witnessMerkleRoot || witnessNonce). Where witnessNonce is the
// coinbase transaction's only witness item.
witnessMerkleTree := BuildMerkleTreeStore(blk.Transactions(), true)
witnessMerkleRoot := witnessMerkleTree[len(witnessMerkleTree)-1]
var witnessPreimage [chainhash.HashSize * 2]byte
copy(witnessPreimage[:], witnessMerkleRoot[:])
copy(witnessPreimage[chainhash.HashSize:], witnessNonce)
computedCommitment := chainhash.DoubleHashB(witnessPreimage[:])
if !bytes.Equal(computedCommitment, witnessCommitment) {
str := fmt.Sprintf("witness commitment does not match: "+
"computed %v, coinbase includes %v", computedCommitment,
witnessCommitment)
return ruleError(ErrWitnessCommitmentMismatch, str)
}
return nil
}

View File

@ -13,7 +13,7 @@ import (
// TestMerkle tests the BuildMerkleTreeStore API.
func TestMerkle(t *testing.T) {
block := btcutil.NewBlock(&Block100000)
merkles := BuildMerkleTreeStore(block.Transactions(), false)
merkles := BuildMerkleTreeStore(block.Transactions())
calculatedMerkleRoot := merkles[len(merkles)-1]
wantMerkle := &Block100000.Header.MerkleRoot
if !wantMerkle.IsEqual(calculatedMerkleRoot) {

View File

@ -20,7 +20,6 @@ type txValidateItem struct {
txInIndex int
txIn *wire.TxIn
tx *btcutil.Tx
sigHashes *txscript.TxSigHashes
}
// txValidator provides a type which asynchronously validates transaction
@ -33,7 +32,6 @@ type txValidator struct {
utxoView *UtxoViewpoint
flags txscript.ScriptFlags
sigCache *txscript.SigCache
hashCache *txscript.HashCache
}
// sendResult sends the result of a script pair validation on the internal
@ -71,20 +69,17 @@ out:
// Create a new script engine for the script pair.
sigScript := txIn.SignatureScript
witness := txIn.Witness
pkScript := utxo.PkScript()
inputAmount := utxo.Amount()
vm, err := txscript.NewEngine(pkScript, txVI.tx.MsgTx(),
txVI.txInIndex, v.flags, v.sigCache, txVI.sigHashes,
inputAmount)
txVI.txInIndex, v.flags, v.sigCache, &txscript.TxSigHashes{},
utxo.Amount())
if err != nil {
str := fmt.Sprintf("failed to parse input "+
"%s:%d which references output %v - "+
"%v (input witness %x, input script "+
"bytes %x, prev output script bytes %x)",
"%v (input script bytes %x, prev "+
"output script bytes %x)",
txVI.tx.Hash(), txVI.txInIndex,
txIn.PreviousOutPoint, err, witness,
sigScript, pkScript)
txIn.PreviousOutPoint, err, sigScript, pkScript)
err := ruleError(ErrScriptMalformed, str)
v.sendResult(err)
break out
@ -94,11 +89,10 @@ out:
if err := vm.Execute(); err != nil {
str := fmt.Sprintf("failed to validate input "+
"%s:%d which references output %v - "+
"%v (input witness %x, input script "+
"bytes %x, prev output script bytes %x)",
"%v (input script bytes %x, prev output "+
"script bytes %x)",
txVI.tx.Hash(), txVI.txInIndex,
txIn.PreviousOutPoint, err, witness,
sigScript, pkScript)
txIn.PreviousOutPoint, err, sigScript, pkScript)
err := ruleError(ErrScriptValidation, str)
v.sendResult(err)
break out
@ -173,47 +167,20 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
// newTxValidator returns a new instance of txValidator to be used for
// validating transaction scripts asynchronously.
func newTxValidator(utxoView *UtxoViewpoint, flags txscript.ScriptFlags,
sigCache *txscript.SigCache, hashCache *txscript.HashCache) *txValidator {
func newTxValidator(utxoView *UtxoViewpoint, flags txscript.ScriptFlags, sigCache *txscript.SigCache) *txValidator {
return &txValidator{
validateChan: make(chan *txValidateItem),
quitChan: make(chan struct{}),
resultChan: make(chan error),
utxoView: utxoView,
sigCache: sigCache,
hashCache: hashCache,
flags: flags,
}
}
// ValidateTransactionScripts validates the scripts for the passed transaction
// using multiple goroutines.
func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint,
flags txscript.ScriptFlags, sigCache *txscript.SigCache,
hashCache *txscript.HashCache) error {
// First determine if segwit is active according to the scriptFlags. If
// it isn't then we don't need to interact with the HashCache.
segwitActive := flags&txscript.ScriptVerifyWitness == txscript.ScriptVerifyWitness
// If the hashcache doesn't yet has the sighash midstate for this
// transaction, then we'll compute them now so we can re-use them
// amongst all worker validation goroutines.
if segwitActive && tx.MsgTx().HasWitness() &&
!hashCache.ContainsHashes(tx.Hash()) {
hashCache.AddSigHashes(tx.MsgTx())
}
var cachedHashes *txscript.TxSigHashes
if segwitActive && tx.MsgTx().HasWitness() {
// The same pointer to the transaction's sighash midstate will
// be re-used amongst all validation goroutines. By
// pre-computing the sighash here instead of during validation,
// we ensure the sighashes
// are only computed once.
cachedHashes, _ = hashCache.GetSigHashes(tx.Hash())
}
func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
// Collect all of the transaction inputs and required information for
// validation.
txIns := tx.MsgTx().TxIn
@ -228,26 +195,18 @@ func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint,
txInIndex: txInIdx,
txIn: txIn,
tx: tx,
sigHashes: cachedHashes,
}
txValItems = append(txValItems, txVI)
}
// Validate all of the inputs.
validator := newTxValidator(utxoView, flags, sigCache, hashCache)
validator := newTxValidator(utxoView, flags, sigCache)
return validator.Validate(txValItems)
}
// checkBlockScripts executes and validates the scripts for all transactions in
// the passed block using multiple goroutines.
func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint,
scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache,
hashCache *txscript.HashCache) error {
// First determine if segwit is active according to the scriptFlags. If
// it isn't then we don't need to interact with the HashCache.
segwitActive := scriptFlags&txscript.ScriptVerifyWitness == txscript.ScriptVerifyWitness
func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
// Collect all of the transaction inputs and required information for
// validation for all transactions in the block into a single slice.
numInputs := 0
@ -256,28 +215,6 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint,
}
txValItems := make([]*txValidateItem, 0, numInputs)
for _, tx := range block.Transactions() {
hash := tx.Hash()
// If the HashCache is present, and it doesn't yet contain the
// partial sighashes for this transaction, then we add the
// sighashes for the transaction. This allows us to take
// advantage of the potential speed savings due to the new
// digest algorithm (BIP0143).
if segwitActive && tx.HasWitness() && hashCache != nil &&
!hashCache.ContainsHashes(hash) {
hashCache.AddSigHashes(tx.MsgTx())
}
var cachedHashes *txscript.TxSigHashes
if segwitActive && tx.HasWitness() {
if hashCache != nil {
cachedHashes, _ = hashCache.GetSigHashes(hash)
} else {
cachedHashes = txscript.NewTxSigHashes(tx.MsgTx())
}
}
for txInIdx, txIn := range tx.MsgTx().TxIn {
// Skip coinbases.
if txIn.PreviousOutPoint.Index == math.MaxUint32 {
@ -288,14 +225,13 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint,
txInIndex: txInIdx,
txIn: txIn,
tx: tx,
sigHashes: cachedHashes,
}
txValItems = append(txValItems, txVI)
}
}
// Validate all of the inputs.
validator := newTxValidator(utxoView, scriptFlags, sigCache, hashCache)
validator := newTxValidator(utxoView, scriptFlags, sigCache)
start := time.Now()
if err := validator.Validate(txValItems); err != nil {
return err
@ -304,16 +240,5 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint,
log.Tracef("block %v took %v to verify", block.Hash(), elapsed)
// If the HashCache is present, once we have validated the block, we no
// longer need the cached hashes for these transactions, so we purge
// them from the cache.
if segwitActive && hashCache != nil {
for _, tx := range block.Transactions() {
if tx.MsgTx().HasWitness() {
hashCache.PurgeSigHashes(tx.Hash())
}
}
}
return nil
}

View File

@ -41,7 +41,7 @@ func TestCheckBlockScripts(t *testing.T) {
}
scriptFlags := txscript.ScriptBip16
err = checkBlockScripts(blocks[0], view, scriptFlags, nil, nil)
err = checkBlockScripts(blocks[0], view, scriptFlags, nil)
if err != nil {
t.Errorf("Transaction script validation failed: %v\n", err)
return

View File

@ -45,6 +45,10 @@ const (
// baseSubsidy is the starting subsidy amount for mined blocks. This
// value is halved every SubsidyHalvingInterval blocks.
baseSubsidy = 50 * btcutil.SatoshiPerBitcoin
// MaxOutputsPerBlock is the maximum number of transaction outputs there
// can be in a block of max size.
MaxOutputsPerBlock = wire.MaxBlockPayload / wire.MinTxOutPayload
)
var (
@ -220,10 +224,10 @@ func CheckTransactionSanity(tx *btcutil.Tx) error {
// A transaction must not exceed the maximum allowed block payload when
// serialized.
serializedTxSize := tx.MsgTx().SerializeSizeStripped()
if serializedTxSize > MaxBlockBaseSize {
serializedTxSize := tx.MsgTx().SerializeSize()
if serializedTxSize > wire.MaxBlockPayload {
str := fmt.Sprintf("serialized transaction is too big - got "+
"%d, max %d", serializedTxSize, MaxBlockBaseSize)
"%d, max %d", serializedTxSize, wire.MaxBlockPayload)
return ruleError(ErrTxTooBig, str)
}
@ -484,19 +488,19 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
}
// A block must not have more transactions than the max block payload or
// else it is certainly over the weight limit.
if numTx > MaxBlockBaseSize {
// else it is certainly over the block size limit.
if numTx > wire.MaxBlockPayload {
str := fmt.Sprintf("block contains too many transactions - "+
"got %d, max %d", numTx, MaxBlockBaseSize)
"got %d, max %d", numTx, wire.MaxBlockPayload)
return ruleError(ErrBlockTooBig, str)
}
// A block must not exceed the maximum allowed block payload when
// serialized.
serializedSize := msgBlock.SerializeSizeStripped()
if serializedSize > MaxBlockBaseSize {
serializedSize := msgBlock.SerializeSize()
if serializedSize > wire.MaxBlockPayload {
str := fmt.Sprintf("serialized block is too big - got %d, "+
"max %d", serializedSize, MaxBlockBaseSize)
"max %d", serializedSize, wire.MaxBlockPayload)
return ruleError(ErrBlockTooBig, str)
}
@ -531,7 +535,7 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// checks. Bitcoind builds the tree here and checks the merkle root
// after the following checks, but there is no reason not to check the
// merkle root matches here.
merkles := BuildMerkleTreeStore(block.Transactions(), false)
merkles := BuildMerkleTreeStore(block.Transactions())
calculatedMerkleRoot := merkles[len(merkles)-1]
if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) {
str := fmt.Sprintf("block merkle root is invalid - block "+
@ -561,11 +565,11 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// We could potentially overflow the accumulator so check for
// overflow.
lastSigOps := totalSigOps
totalSigOps += (CountSigOps(tx) * WitnessScaleFactor)
if totalSigOps < lastSigOps || totalSigOps > MaxBlockSigOpsCost {
totalSigOps += CountSigOps(tx)
if totalSigOps < lastSigOps || totalSigOps > MaxSigOpsPerBlock {
str := fmt.Sprintf("block contains too many signature "+
"operations - got %v, max %v", totalSigOps,
MaxBlockSigOpsCost)
MaxSigOpsPerBlock)
return ruleError(ErrTooManySigOps, str)
}
}
@ -781,42 +785,6 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode
return err
}
}
// Query for the Version Bits state for the segwit soft-fork
// deployment. If segwit is active, we'll switch over to
// enforcing all the new rules.
segwitState, err := b.deploymentState(prevNode,
chaincfg.DeploymentSegwit)
if err != nil {
return err
}
// If segwit is active, then we'll need to fully validate the
// new witness commitment for adherence to the rules.
if segwitState == ThresholdActive {
// Validate the witness commitment (if any) within the
// block. This involves asserting that if the coinbase
// contains the special commitment output, then this
// merkle root matches a computed merkle root of all
// the wtxid's of the transactions within the block. In
// addition, various other checks against the
// coinbase's witness stack.
if err := ValidateWitnessCommitment(block); err != nil {
return err
}
// Once the witness commitment, witness nonce, and sig
// op cost have been validated, we can finally assert
// that the block's weight doesn't exceed the current
// consensus parameter.
blockWeight := GetBlockWeight(block)
if blockWeight > MaxBlockWeight {
str := fmt.Sprintf("block's weight metric is "+
"too high - got %v, max %v",
blockWeight, MaxBlockWeight)
return ruleError(ErrBlockWeightTooHigh, str)
}
}
}
return nil
@ -1051,15 +1019,6 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// https://en.bitcoin.it/wiki/BIP_0016 for more details.
enforceBIP0016 := node.timestamp >= txscript.Bip16Activation.Unix()
// Query for the Version Bits state for the segwit soft-fork
// deployment. If segwit is active, we'll switch over to enforcing all
// the new rules.
segwitState, err := b.deploymentState(node.parent, chaincfg.DeploymentSegwit)
if err != nil {
return err
}
enforceSegWit := segwitState == ThresholdActive
// The number of signature operations must be less than the maximum
// allowed per block. Note that the preliminary sanity checks on a
// block also include a check similar to this one, but this check
@ -1067,28 +1026,31 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// signature operations in each of the input transaction public key
// scripts.
transactions := block.Transactions()
totalSigOpCost := 0
totalSigOps := 0
for i, tx := range transactions {
// Since the first (and only the first) transaction has
// already been verified to be a coinbase transaction,
// use i == 0 as an optimization for the flag to
// countP2SHSigOps for whether or not the transaction is
// a coinbase transaction rather than having to do a
// full coinbase check again.
sigOpCost, err := GetSigOpCost(tx, i == 0, view, enforceBIP0016,
enforceSegWit)
if err != nil {
return err
numsigOps := CountSigOps(tx)
if enforceBIP0016 {
// Since the first (and only the first) transaction has
// already been verified to be a coinbase transaction,
// use i == 0 as an optimization for the flag to
// countP2SHSigOps for whether or not the transaction is
// a coinbase transaction rather than having to do a
// full coinbase check again.
numP2SHSigOps, err := CountP2SHSigOps(tx, i == 0, view)
if err != nil {
return err
}
numsigOps += numP2SHSigOps
}
// Check for overflow or going over the limits. We have to do
// this on every loop iteration to avoid overflow.
lastSigOpCost := totalSigOpCost
totalSigOpCost += sigOpCost
if totalSigOpCost < lastSigOpCost || totalSigOpCost > MaxBlockSigOpsCost {
lastSigops := totalSigOps
totalSigOps += numsigOps
if totalSigOps < lastSigops || totalSigOps > MaxSigOpsPerBlock {
str := fmt.Sprintf("block contains too many "+
"signature operations - got %v, max %v",
totalSigOpCost, MaxBlockSigOpsCost)
totalSigOps, MaxSigOpsPerBlock)
return ruleError(ErrTooManySigOps, str)
}
}
@ -1216,20 +1178,12 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
}
}
// Enforce the segwit soft-fork package once the soft-fork has shifted
// into the "active" version bits state.
if enforceSegWit {
scriptFlags |= txscript.ScriptVerifyWitness
scriptFlags |= txscript.ScriptStrictMultiSig
}
// Now that the inexpensive checks are done and have passed, verify the
// transactions are actually allowed to spend the coins by running the
// expensive ECDSA signature check scripts. Doing this last helps
// prevent CPU exhaustion attacks.
if runScripts {
err := checkBlockScripts(block, view, scriptFlags, b.sigCache,
b.hashCache)
err := checkBlockScripts(block, view, scriptFlags, b.sigCache)
if err != nil {
return err
}

View File

@ -1,117 +0,0 @@
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"fmt"
"github.com/daglabs/btcd/txscript"
"github.com/daglabs/btcd/wire"
"github.com/daglabs/btcutil"
)
const (
// MaxBlockWeight defines the maximum block weight, where "block
// weight" is interpreted as defined in BIP0141. A block's weight is
// calculated as the sum of the of bytes in the existing transactions
// and header, plus the weight of each byte within a transaction. The
// weight of a "base" byte is 4, while the weight of a witness byte is
// 1. As a result, for a block to be valid, the BlockWeight MUST be
// less than, or equal to MaxBlockWeight.
MaxBlockWeight = 4000000
// MaxBlockBaseSize is the maximum number of bytes within a block
// which can be allocated to non-witness data.
MaxBlockBaseSize = 1000000
// MaxBlockSigOpsCost is the maximum number of signature operations
// allowed for a block. It is calculated via a weighted algorithm which
// weights segregated witness sig ops lower than regular sig ops.
MaxBlockSigOpsCost = 80000
// WitnessScaleFactor determines the level of "discount" witness data
// receives compared to "base" data. A scale factor of 4, denotes that
// witness data is 1/4 as cheap as regular non-witness data.
WitnessScaleFactor = 4
// MinTxOutputWeight is the minimum possible weight for a transaction
// output.
MinTxOutputWeight = WitnessScaleFactor * wire.MinTxOutPayload
// MaxOutputsPerBlock is the maximum number of transaction outputs there
// can be in a block of max weight size.
MaxOutputsPerBlock = MaxBlockWeight / MinTxOutputWeight
)
// GetBlockWeight computes the value of the weight metric for a given block.
// Currently the weight metric is simply the sum of the block's serialized size
// without any witness data scaled proportionally by the WitnessScaleFactor,
// and the block's serialized size including any witness data.
func GetBlockWeight(blk *btcutil.Block) int64 {
msgBlock := blk.MsgBlock()
baseSize := msgBlock.SerializeSizeStripped()
totalSize := msgBlock.SerializeSize()
// (baseSize * 3) + totalSize
return int64((baseSize * (WitnessScaleFactor - 1)) + totalSize)
}
// GetTransactionWeight computes the value of the weight metric for a given
// transaction. Currently the weight metric is simply the sum of the
// transactions's serialized size without any witness data scaled
// proportionally by the WitnessScaleFactor, and the transaction's serialized
// size including any witness data.
func GetTransactionWeight(tx *btcutil.Tx) int64 {
msgTx := tx.MsgTx()
baseSize := msgTx.SerializeSizeStripped()
totalSize := msgTx.SerializeSize()
// (baseSize * 3) + totalSize
return int64((baseSize * (WitnessScaleFactor - 1)) + totalSize)
}
// GetSigOpCost returns the unified sig op cost for the passed transaction
// respecting current active soft-forks which modified sig op cost counting.
// The unified sig op cost for a transaction is computed as the sum of: the
// legacy sig op count scaled according to the WitnessScaleFactor, the sig op
// count for all p2sh inputs scaled by the WitnessScaleFactor, and finally the
// unscaled sig op count for any inputs spending witness programs.
func GetSigOpCost(tx *btcutil.Tx, isCoinBaseTx bool, utxoView *UtxoViewpoint, bip16, segWit bool) (int, error) {
numSigOps := CountSigOps(tx) * WitnessScaleFactor
if bip16 {
numP2SHSigOps, err := CountP2SHSigOps(tx, isCoinBaseTx, utxoView)
if err != nil {
return 0, nil
}
numSigOps += (numP2SHSigOps * WitnessScaleFactor)
}
if segWit && !isCoinBaseTx {
msgTx := tx.MsgTx()
for txInIndex, txIn := range msgTx.TxIn {
// Ensure the referenced output is available and hasn't
// already been spent.
utxo := utxoView.LookupEntry(txIn.PreviousOutPoint)
if utxo == nil || utxo.IsSpent() {
str := fmt.Sprintf("output %v referenced from "+
"transaction %s:%d either does not "+
"exist or has already been spent",
txIn.PreviousOutPoint, tx.Hash(),
txInIndex)
return 0, ruleError(ErrMissingTxOut, str)
}
witness := txIn.Witness
sigScript := txIn.SignatureScript
pkScript := utxo.PkScript()
numSigOps += txscript.GetWitnessSigOpCount(sigScript, pkScript, witness)
}
}
return numSigOps, nil
}

View File

@ -21,13 +21,13 @@ import (
"time"
"github.com/btcsuite/go-socks/socks"
"github.com/daglabs/btcd/blockchain"
"github.com/daglabs/btcd/chaincfg"
"github.com/daglabs/btcd/chaincfg/chainhash"
"github.com/daglabs/btcd/connmgr"
"github.com/daglabs/btcd/database"
_ "github.com/daglabs/btcd/database/ffldb"
"github.com/daglabs/btcd/mempool"
"github.com/daglabs/btcd/wire"
"github.com/daglabs/btcutil"
flags "github.com/jessevdk/go-flags"
)
@ -50,7 +50,7 @@ const (
defaultBlockMinSize = 0
defaultBlockMaxSize = 750000
blockMaxSizeMin = 1000
blockMaxSizeMax = blockchain.MaxBlockBaseSize - 1000
blockMaxSizeMax = wire.MaxBlockPayload - 1000
defaultGenerate = false
defaultMaxOrphanTransactions = 100
defaultMaxOrphanTxSize = 100000

View File

@ -181,7 +181,7 @@ func CreateBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx,
if inclusionTxs != nil {
blockTxns = append(blockTxns, inclusionTxs...)
}
merkles := blockchain.BuildMerkleTreeStore(blockTxns, false)
merkles := blockchain.BuildMerkleTreeStore(blockTxns)
var block wire.MsgBlock
block.Header = wire.BlockHeader{
Version: blockVersion,

View File

@ -82,9 +82,6 @@ type Config struct {
// SigCache defines a signature cache to use.
SigCache *txscript.SigCache
// HashCache defines the transaction hash mid-state cache to use.
HashCache *txscript.HashCache
// AddrIndex defines the optional address index instance to use for
// indexing the unconfirmed transactions in the memory pool.
// This can be nil if the address index is not enabled.
@ -125,10 +122,10 @@ type Policy struct {
// of big orphans.
MaxOrphanTxSize int
// MaxSigOpCostPerTx is the cumulative maximum cost of all the signature
// operations in a single transaction we will relay or mine. It is a
// fraction of the max signature operations for a block.
MaxSigOpCostPerTx int
// MaxSigOpsPerTx is the maximum number of signature operations
// in a single transaction we will relay or mine. It is a fraction
// of the max signature operations for a block.
MaxSigOpsPerTx int
// MinRelayTxFee defines the minimum transaction fee in BTC/kB to be
// considered a non-zero fee.
@ -809,16 +806,16 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec
// the coinbase address itself can contain signature operations, the
// maximum allowed signature operations per transaction is less than
// the maximum allowed signature operations per block.
sigOpCost, err := blockchain.GetSigOpCost(tx, false, utxoView, true, false)
sigOpCount, err := blockchain.CountP2SHSigOps(tx, false, utxoView)
if err != nil {
if cerr, ok := err.(blockchain.RuleError); ok {
return nil, nil, chainRuleError(cerr)
}
return nil, nil, err
}
if sigOpCost > mp.cfg.Policy.MaxSigOpCostPerTx {
str := fmt.Sprintf("transaction %v sigop cost is too high: %d > %d",
txHash, sigOpCost, mp.cfg.Policy.MaxSigOpCostPerTx)
if sigOpCount > mp.cfg.Policy.MaxSigOpsPerTx {
str := fmt.Sprintf("transaction %v sigop count is too high: %d > %d",
txHash, sigOpCount, mp.cfg.Policy.MaxSigOpsPerTx)
return nil, nil, txRuleError(wire.RejectNonstandard, str)
}
@ -885,8 +882,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejec
// Verify crypto signatures for each input and reject the transaction if
// any don't verify.
err = blockchain.ValidateTransactionScripts(tx, utxoView,
txscript.StandardVerifyFlags, mp.cfg.SigCache,
mp.cfg.HashCache)
txscript.StandardVerifyFlags, mp.cfg.SigCache)
if err != nil {
if cerr, ok := err.(blockchain.RuleError); ok {
return nil, nil, chainRuleError(cerr)

View File

@ -313,7 +313,7 @@ func newPoolHarness(chainParams *chaincfg.Params) (*poolHarness, []spendableOutp
FreeTxRelayLimit: 15.0,
MaxOrphanTxs: 5,
MaxOrphanTxSize: 1000,
MaxSigOpCostPerTx: blockchain.MaxBlockSigOpsCost / 4,
MaxSigOpsPerTx: blockchain.MaxSigOpsPerBlock / 5,
MinRelayTxFee: 1000, // 1 Satoshi per byte
MaxTxVersion: 1,
},

View File

@ -345,7 +345,6 @@ type BlkTmplGenerator struct {
chain *blockchain.BlockChain
timeSource blockchain.MedianTimeSource
sigCache *txscript.SigCache
hashCache *txscript.HashCache
}
// NewBlkTmplGenerator returns a new block template generator for the given
@ -357,8 +356,7 @@ type BlkTmplGenerator struct {
func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params,
txSource TxSource, chain *blockchain.BlockChain,
timeSource blockchain.MedianTimeSource,
sigCache *txscript.SigCache,
hashCache *txscript.HashCache) *BlkTmplGenerator {
sigCache *txscript.SigCache) *BlkTmplGenerator {
return &BlkTmplGenerator{
policy: policy,
@ -367,7 +365,6 @@ func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params,
chain: chain,
timeSource: timeSource,
sigCache: sigCache,
hashCache: hashCache,
}
}
@ -636,7 +633,7 @@ mempoolLoop:
}
numSigOps += int64(numP2SHSigOps)
if blockSigOps+numSigOps < blockSigOps ||
blockSigOps+numSigOps > blockchain.MaxBlockSigOpsCost {
blockSigOps+numSigOps > blockchain.MaxSigOpsPerBlock {
log.Tracef("Skipping tx %s because it would "+
"exceed the maximum sigops per block", tx.Hash())
logSkippedDeps(tx, deps)
@ -698,8 +695,7 @@ mempoolLoop:
continue
}
err = blockchain.ValidateTransactionScripts(tx, blockUtxos,
txscript.StandardVerifyFlags, g.sigCache,
g.hashCache)
txscript.StandardVerifyFlags, g.sigCache)
if err != nil {
log.Tracef("Skipping tx %s due to error in "+
"ValidateTransactionScripts: %v", tx.Hash(), err)
@ -764,7 +760,7 @@ mempoolLoop:
}
// Create a new block ready to be solved.
merkles := blockchain.BuildMerkleTreeStore(blockTxns, false)
merkles := blockchain.BuildMerkleTreeStore(blockTxns)
var msgBlock wire.MsgBlock
msgBlock.Header = wire.BlockHeader{
Version: nextBlockVersion,
@ -850,7 +846,7 @@ func (g *BlkTmplGenerator) UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight
// Recalculate the merkle root with the updated extra nonce.
block := btcutil.NewBlock(msgBlock)
merkles := blockchain.BuildMerkleTreeStore(block.Transactions(), false)
merkles := blockchain.BuildMerkleTreeStore(block.Transactions())
msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1]
return nil
}

View File

@ -1603,7 +1603,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo
// Update the merkle root.
block := btcutil.NewBlock(template.Block)
merkles := blockchain.BuildMerkleTreeStore(block.Transactions(), false)
merkles := blockchain.BuildMerkleTreeStore(block.Transactions())
template.Block.Header.MerkleRoot = *merkles[len(merkles)-1]
}
@ -1711,7 +1711,7 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld
CurTime: header.Timestamp.Unix(),
Height: int64(template.Height),
PreviousHash: header.PrevBlock.String(),
SigOpLimit: blockchain.MaxBlockSigOpsCost,
SigOpLimit: blockchain.MaxSigOpsPerBlock,
SizeLimit: wire.MaxBlockPayload,
Transactions: transactions,
Version: header.Version,

View File

@ -208,7 +208,6 @@ type server struct {
addrManager *addrmgr.AddrManager
connManager *connmgr.ConnManager
sigCache *txscript.SigCache
hashCache *txscript.HashCache
rpcServer *rpcServer
syncManager *netsync.SyncManager
chain *blockchain.BlockChain
@ -2436,7 +2435,6 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param
timeSource: blockchain.NewMedianTime(),
services: services,
sigCache: txscript.NewSigCache(cfg.SigCacheMaxSize),
hashCache: txscript.NewHashCache(cfg.SigCacheMaxSize),
cfCheckptCaches: make(map[wire.FilterType][]cfHeaderKV),
}
@ -2494,7 +2492,6 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param
TimeSource: s.timeSource,
SigCache: s.sigCache,
IndexManager: indexManager,
HashCache: s.hashCache,
})
if err != nil {
return nil, err
@ -2537,7 +2534,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param
FreeTxRelayLimit: cfg.FreeTxRelayLimit,
MaxOrphanTxs: cfg.MaxOrphanTxs,
MaxOrphanTxSize: defaultMaxOrphanTxSize,
MaxSigOpCostPerTx: blockchain.MaxBlockSigOpsCost / 4,
MaxSigOpsPerTx: blockchain.MaxSigOpsPerBlock / 5,
MinRelayTxFee: cfg.minRelayTxFee,
MaxTxVersion: 2,
},
@ -2550,7 +2547,6 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param
},
IsDeploymentActive: s.chain.IsDeploymentActive,
SigCache: s.sigCache,
HashCache: s.hashCache,
AddrIndex: s.addrIndex,
FeeEstimator: s.feeEstimator,
}
@ -2581,8 +2577,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param
TxMinFreeFee: cfg.minRelayTxFee,
}
blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy,
s.chainParams, s.txMemPool, s.chain, s.timeSource,
s.sigCache, s.hashCache)
s.chainParams, s.txMemPool, s.chain, s.timeSource, s.sigCache)
s.cpuMiner = cpuminer.New(&cpuminer.Config{
ChainParams: chainParams,
BlockTemplateGenerator: blockTemplateGenerator,

View File

@ -23,8 +23,7 @@ const defaultTransactionAlloc = 2048
const MaxBlocksPerMsg = 500
// MaxBlockPayload is the maximum bytes a block message can be in bytes.
// After Segregated Witness, the max block payload has been raised to 4MB.
const MaxBlockPayload = 4000000
const MaxBlockPayload = 1000000
// maxTxPerBlock is the maximum number of transactions that could
// possibly fit into a block.

View File

@ -36,7 +36,7 @@ func TestBlock(t *testing.T) {
// Ensure max payload is expected value for latest protocol version.
// Num addresses (varInt) + max allowed addresses.
wantPayload := uint32(4000000)
wantPayload := uint32(1000000)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+

View File

@ -38,7 +38,7 @@ func TestMerkleBlock(t *testing.T) {
// Ensure max payload is expected value for latest protocol version.
// Num addresses (varInt) + max allowed addresses.
wantPayload := uint32(4000000)
wantPayload := uint32(1000000)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+

View File

@ -35,7 +35,7 @@ func TestTx(t *testing.T) {
}
// Ensure max payload is expected value for latest protocol version.
wantPayload := uint32(1000 * 4000)
wantPayload := uint32(1000 * 1000)
maxPayload := msg.MaxPayloadLength(pver)
if maxPayload != wantPayload {
t.Errorf("MaxPayloadLength: wrong max payload length for "+