mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-03-30 15:08:33 +00:00
[NOD-1101] Hash data without serializing into a buffer first (#779)
* Add Hash Writers * Add the hash writers to the tests * Add the DoubleHash Writer to the benchmarks * Remove buffers from hashing by using the Hash Writer * Replace empty slice with nil in mempool test payload
This commit is contained in:
parent
2af03c1ccf
commit
0bf1052abf
@ -411,3 +411,21 @@ func BenchmarkDoubleHashH(b *testing.B) {
|
||||
_ = daghash.DoubleHashH(txBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkDoubleHashWriter performs a benchmark on how long it takes to perform
|
||||
// a double hash via the writer returning a daghash.Hash.
|
||||
func BenchmarkDoubleHashWriter(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
err := genesisCoinbaseTx.Serialize(&buf)
|
||||
if err != nil {
|
||||
b.Fatalf("Serialize: unexpected error: %+v", err)
|
||||
}
|
||||
txBytes := buf.Bytes()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
writer := daghash.NewDoubleHashWriter()
|
||||
_, _ = writer.Write(txBytes)
|
||||
writer.Finalize()
|
||||
}
|
||||
}
|
||||
|
@ -5,10 +5,11 @@
|
||||
package domainmessage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/mstime"
|
||||
"io"
|
||||
)
|
||||
|
||||
// BaseBlockHeaderPayload is the base number of bytes a block header can be,
|
||||
@ -65,13 +66,18 @@ func (h *BlockHeader) NumParentBlocks() byte {
|
||||
// BlockHash computes the block identifier hash for the given block header.
|
||||
func (h *BlockHeader) BlockHash() *daghash.Hash {
|
||||
// Encode the header and double sha256 everything prior to the number of
|
||||
// transactions. Ignore the error returns since there is no way the
|
||||
// encode could fail except being out of memory which would cause a
|
||||
// run-time panic.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, BaseBlockHeaderPayload+h.NumParentBlocks()))
|
||||
_ = writeBlockHeader(buf, 0, h)
|
||||
// transactions.
|
||||
writer := daghash.NewDoubleHashWriter()
|
||||
err := writeBlockHeader(writer, 0, h)
|
||||
if err != nil {
|
||||
// It seems like this could only happen if the writer returned an error.
|
||||
// and this writer should never return an error (no allocations or possible failures)
|
||||
// the only non-writer error path here is unknown types in `WriteElement`
|
||||
panic(fmt.Sprintf("BlockHash() failed. this should never fail unless BlockHeader was changed. err: %+v", err))
|
||||
}
|
||||
|
||||
return daghash.DoubleHashP(buf.Bytes())
|
||||
res := writer.Finalize()
|
||||
return &res
|
||||
}
|
||||
|
||||
// IsGenesis returns true iff this block is a genesis block
|
||||
|
@ -5,7 +5,6 @@
|
||||
package domainmessage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -302,29 +301,34 @@ func (msg *MsgTx) IsCoinBase() bool {
|
||||
// TxHash generates the Hash for the transaction.
|
||||
func (msg *MsgTx) TxHash() *daghash.Hash {
|
||||
// Encode the transaction and calculate double sha256 on the result.
|
||||
// Ignore the error returns since the only way the encode could fail
|
||||
// is being out of memory or due to nil pointers, both of which would
|
||||
// cause a run-time panic.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(txEncodingExcludePayload)))
|
||||
_ = msg.serialize(buf, txEncodingExcludePayload)
|
||||
writer := daghash.NewDoubleHashWriter()
|
||||
err := msg.serialize(writer, txEncodingExcludePayload)
|
||||
if err != nil {
|
||||
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
|
||||
// and we assume we never construct malformed transactions.
|
||||
panic(fmt.Sprintf("TxHash() failed. this should never fail for structurally-valid transactions. err: %+v", err))
|
||||
}
|
||||
|
||||
hash := daghash.Hash(daghash.DoubleHashH(buf.Bytes()))
|
||||
hash := writer.Finalize()
|
||||
return &hash
|
||||
}
|
||||
|
||||
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
|
||||
func (msg *MsgTx) TxID() *daghash.TxID {
|
||||
// Encode the transaction, replace signature script with zeroes, cut off
|
||||
// payload and calculate double sha256 on the result. Ignore the error
|
||||
// returns since the only way the encode could fail is being out of memory or
|
||||
// due to nil pointers, both of which would cause a run-time panic.
|
||||
// payload and calculate double sha256 on the result.
|
||||
var encodingFlags txEncoding
|
||||
if !msg.IsCoinBase() {
|
||||
encodingFlags = txEncodingExcludeSignatureScript | txEncodingExcludePayload
|
||||
}
|
||||
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(encodingFlags)))
|
||||
_ = msg.serialize(buf, encodingFlags)
|
||||
txID := daghash.TxID(daghash.DoubleHashH(buf.Bytes()))
|
||||
writer := daghash.NewDoubleHashWriter()
|
||||
err := msg.serialize(writer, encodingFlags)
|
||||
if err != nil {
|
||||
// this writer never return errors (no allocations or possible failures) so errors can only come from validity checks,
|
||||
// and we assume we never construct malformed transactions.
|
||||
panic(fmt.Sprintf("TxID() failed. this should never fail for structurally-valid transactions. err: %+v", err))
|
||||
}
|
||||
txID := daghash.TxID(writer.Finalize())
|
||||
return &txID
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,8 @@ func (p *poolHarness) CreateSignedTxForSubnetwork(inputs []spendableOutpoint, nu
|
||||
})
|
||||
}
|
||||
|
||||
tx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, txIns, txOuts, subnetworkID, gas, []byte{})
|
||||
// Payload must be nil because it might be the native subnetwork
|
||||
tx := domainmessage.NewSubnetworkMsgTx(domainmessage.TxVersion, txIns, txOuts, subnetworkID, gas, nil)
|
||||
|
||||
// Sign the new transaction.
|
||||
for i := range tx.TxIn {
|
||||
|
@ -364,10 +364,10 @@ func calcSignatureHash(script []parsedOpcode, hashType SigHashType, tx *domainme
|
||||
// The final hash is the double sha256 of both the serialized modified
|
||||
// transaction and the hash type (encoded as a 4-byte little-endian
|
||||
// value) appended.
|
||||
wbuf := bytes.NewBuffer(make([]byte, 0, txCopy.SerializeSize()+4))
|
||||
txCopy.Serialize(wbuf)
|
||||
binary.Write(wbuf, binary.LittleEndian, hashType)
|
||||
hash := daghash.DoubleHashH(wbuf.Bytes())
|
||||
writer := daghash.NewDoubleHashWriter()
|
||||
txCopy.Serialize(writer)
|
||||
binary.Write(writer, binary.LittleEndian, hashType)
|
||||
hash := writer.Finalize()
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
|
@ -68,6 +68,35 @@ func TestHashFuncs(t *testing.T) {
|
||||
t.Errorf("HashH(%q) = %s, want %s", test.in, h, test.out)
|
||||
continue
|
||||
}
|
||||
writer := NewHashWriter()
|
||||
n, err := writer.Write([]byte(test.in))
|
||||
if err != nil || n != len(test.in) {
|
||||
t.Fatalf("This should never fail, n expected: '%d', found: '%d'. err: '%+v'.", n, len(test.in), err)
|
||||
}
|
||||
hash = writer.Finalize()
|
||||
h = fmt.Sprintf("%x", hash[:])
|
||||
if h != test.out {
|
||||
t.Fatalf("Finalize(%q) = %s, want %s", test.in, h, test.out)
|
||||
}
|
||||
|
||||
// Test that writing the input part by part still results in the same hash.
|
||||
writer = NewHashWriter()
|
||||
slice := []byte(test.in)[:len(test.in)/2]
|
||||
n, err = writer.Write(slice)
|
||||
if err != nil || n != len(slice) {
|
||||
t.Fatalf("This should never fail, n expected: '%d', found: '%d'. err: '%+v'.", n, len(slice), err)
|
||||
}
|
||||
slice = []byte(test.in)[len(test.in)/2:]
|
||||
n, err = writer.Write([]byte(test.in)[len(test.in)/2:])
|
||||
if err != nil || n != len(slice) {
|
||||
t.Fatalf("This should never fail, n expected: '%d', found: '%d'. err: '%+v'.", n, len(slice), err)
|
||||
}
|
||||
hash = writer.Finalize()
|
||||
h = fmt.Sprintf("%x", hash[:])
|
||||
if h != test.out {
|
||||
t.Errorf("Finalize(%q) = %s, want %s", test.in, h, test.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,5 +161,35 @@ func TestDoubleHashFuncs(t *testing.T) {
|
||||
test.out)
|
||||
continue
|
||||
}
|
||||
writer := NewDoubleHashWriter()
|
||||
n, err := writer.Write([]byte(test.in))
|
||||
if err != nil || n != len(test.in) {
|
||||
t.Fatalf("This should never fail, n expected: '%d', found: '%d'. err: '%+v'.", n, len(test.in), err)
|
||||
}
|
||||
hash = writer.Finalize()
|
||||
h = fmt.Sprintf("%x", hash[:])
|
||||
if h != test.out {
|
||||
t.Errorf("Finalize(%q) = %s, want %s", test.in, h, test.out)
|
||||
continue
|
||||
}
|
||||
|
||||
// Test that writing the input part by part still results in the same hash.
|
||||
writer = NewDoubleHashWriter()
|
||||
slice := []byte(test.in)[:len(test.in)/2]
|
||||
n, err = writer.Write(slice)
|
||||
if err != nil || n != len(slice) {
|
||||
t.Fatalf("This should never fail, n expected: '%d', found: '%d'. err: '%+v'.", n, len(slice), err)
|
||||
}
|
||||
slice = []byte(test.in)[len(test.in)/2:]
|
||||
n, err = writer.Write([]byte(test.in)[len(test.in)/2:])
|
||||
if err != nil || n != len(slice) {
|
||||
t.Fatalf("This should never fail, n expected: '%d', found: '%d'. err: '%+v'.", n, len(slice), err)
|
||||
}
|
||||
hash = writer.Finalize()
|
||||
h = fmt.Sprintf("%x", hash[:])
|
||||
if h != test.out {
|
||||
t.Errorf("Finalize(%q) = %s, want %s", test.in, h, test.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
58
util/daghash/hashwriters.go
Normal file
58
util/daghash/hashwriters.go
Normal file
@ -0,0 +1,58 @@
|
||||
package daghash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// HashWriter is used to incrementally hash data without concatenating all of the data to a single buffer
|
||||
// it exposes an io.Writer api and a Finalize function to get the resulting hash.
|
||||
// HashWriter.Write(slice).Finalize == HashH(slice)
|
||||
type HashWriter struct {
|
||||
inner hash.Hash
|
||||
}
|
||||
|
||||
// DoubleHashWriter is used to incrementally double hash data without concatenating all of the data to a single buffer
|
||||
// it exposes an io.Writer api and a Finalize function to get the resulting hash.
|
||||
// DoubleHashWriter.Write(slice).Finalize == DoubleHashH(slice)
|
||||
type DoubleHashWriter struct {
|
||||
inner hash.Hash
|
||||
}
|
||||
|
||||
// NewHashWriter returns a new Hash Writer
|
||||
func NewHashWriter() *HashWriter {
|
||||
return &HashWriter{sha256.New()}
|
||||
}
|
||||
|
||||
// Write will always return (len(p), nil)
|
||||
func (h *HashWriter) Write(p []byte) (n int, err error) {
|
||||
return h.inner.Write(p)
|
||||
}
|
||||
|
||||
// Finalize returns the resulting hash
|
||||
func (h *HashWriter) Finalize() Hash {
|
||||
res := Hash{}
|
||||
// Can never happen, Sha256's Sum is 32 bytes.
|
||||
err := res.SetBytes(h.inner.Sum(nil))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Should never fail, sha256.Sum is 32 bytes and so is daghash.Hash: '%+v'", err))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// NewDoubleHashWriter Returns a new DoubleHashWriter
|
||||
func NewDoubleHashWriter() *DoubleHashWriter {
|
||||
return &DoubleHashWriter{sha256.New()}
|
||||
}
|
||||
|
||||
// Write will always return (len(p), nil)
|
||||
func (h *DoubleHashWriter) Write(p []byte) (n int, err error) {
|
||||
return h.inner.Write(p)
|
||||
}
|
||||
|
||||
// Finalize returns the resulting double hash
|
||||
func (h *DoubleHashWriter) Finalize() Hash {
|
||||
firstHashInTheSum := h.inner.Sum(nil)
|
||||
return sha256.Sum256(firstHashInTheSum)
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user