mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-06 22:26:47 +00:00
[DEV-361] Create type TxID as alias to daghash.Hash. (#175)
* [DEV-361] Create type TxID as alias to daghash.Hash. Use it for transaction IDs * [DEV-361] Fixed missed renames * [DEV-361] Removed usage of zeroHash * [DEV-361] Fixed more missed renames
This commit is contained in:
parent
9a2eee78a4
commit
100fbbaaa4
@ -27,6 +27,15 @@ func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// daghash.TxID. It only differs from the one available in daghash in that
|
||||
// it ignores the error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, IDs.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, _ := daghash.NewTxIDFromStr(hexStr)
|
||||
return txID
|
||||
}
|
||||
|
||||
// Checkpoints returns a slice of checkpoints (regardless of whether they are
|
||||
// already known). When there are no checkpoints for the chain, it will return
|
||||
// nil.
|
||||
|
@ -107,7 +107,7 @@ func loadUTXOSet(filename string) (UTXOSet, error) {
|
||||
utxoSet := NewFullUTXOSet()
|
||||
for {
|
||||
// Tx ID of the utxo entry.
|
||||
var txID daghash.Hash
|
||||
var txID daghash.TxID
|
||||
_, err := io.ReadAtLeast(r, txID[:], len(txID[:]))
|
||||
if err != nil {
|
||||
// Expected EOF at the right offset.
|
||||
|
@ -528,10 +528,10 @@ func deserializeOutPoint(serialized []byte) (*wire.OutPoint, error) {
|
||||
return nil, errDeserialize("unexpected end of data")
|
||||
}
|
||||
|
||||
hash := daghash.Hash{}
|
||||
hash.SetBytes(serialized[:daghash.HashSize])
|
||||
txID := daghash.TxID{}
|
||||
txID.SetBytes(serialized[:daghash.HashSize])
|
||||
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
|
||||
return wire.NewOutPoint(&hash, uint32(index)), nil
|
||||
return wire.NewOutPoint(&txID, uint32(index)), nil
|
||||
}
|
||||
|
||||
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
|
||||
|
@ -233,7 +233,7 @@ func TestSpendJournalSerialization(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: *newHashFromStr("0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"),
|
||||
TxID: *newTxIDFromStr("0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"),
|
||||
Index: 0,
|
||||
},
|
||||
SignatureScript: hexToBytes("47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901"),
|
||||
@ -268,7 +268,7 @@ func TestSpendJournalSerialization(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: *newHashFromStr("c0ed017828e59ad5ed3cf70ee7c6fb0f426433047462477dc7a5d470f987a537"),
|
||||
TxID: *newTxIDFromStr("c0ed017828e59ad5ed3cf70ee7c6fb0f426433047462477dc7a5d470f987a537"),
|
||||
Index: 1,
|
||||
},
|
||||
SignatureScript: hexToBytes("493046022100c167eead9840da4a033c9a56470d7794a9bb1605b377ebe5688499b39f94be59022100fb6345cab4324f9ea0b9ee9169337534834638d818129778370f7d378ee4a325014104d962cac5390f12ddb7539507065d0def320d68c040f2e73337c3a1aaaab7195cb5c4d02e0959624d534f3c10c3cf3d73ca5065ebd62ae986b04c6d090d32627c"),
|
||||
@ -286,7 +286,7 @@ func TestSpendJournalSerialization(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: *newHashFromStr("92fbe1d4be82f765dfabc9559d4620864b05cc897c4db0e29adac92d294e52b7"),
|
||||
TxID: *newTxIDFromStr("92fbe1d4be82f765dfabc9559d4620864b05cc897c4db0e29adac92d294e52b7"),
|
||||
Index: 0,
|
||||
},
|
||||
SignatureScript: hexToBytes("483045022100e256743154c097465cf13e89955e1c9ff2e55c46051b627751dee0144183157e02201d8d4f02cde8496aae66768f94d35ce54465bd4ae8836004992d3216a93a13f00141049d23ce8686fe9b802a7a938e8952174d35dd2c2089d4112001ed8089023ab4f93a3c9fcd5bfeaa9727858bf640dc1b1c05ec3b434bb59837f8640e8810e87742"),
|
||||
@ -353,7 +353,7 @@ func TestSpendJournalErrors(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: *newHashFromStr("0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"),
|
||||
TxID: *newTxIDFromStr("0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"),
|
||||
Index: 0,
|
||||
},
|
||||
SignatureScript: hexToBytes("47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901"),
|
||||
@ -370,7 +370,7 @@ func TestSpendJournalErrors(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: *newHashFromStr("0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"),
|
||||
TxID: *newTxIDFromStr("0437cd7f8525ceed2324359c2d0ba26006d92d856a9c20fa0241106ee5a597c9"),
|
||||
Index: 0,
|
||||
},
|
||||
SignatureScript: hexToBytes("47304402204e45e16932b8af514961a1d3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181522ec8eca07de4860a4acdd12909d831cc56cbbac4622082221a8768d1d0901"),
|
||||
|
@ -285,7 +285,7 @@ func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx {
|
||||
tx.AddTxIn(&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{},
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
SignatureScript: coinbaseScript,
|
||||
@ -1529,9 +1529,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) {
|
||||
// \-> b52(14)
|
||||
g.setTip("b43")
|
||||
g.nextBlock("b52", outs[14], func(b *wire.MsgBlock) {
|
||||
hash := newHashFromStr("00000000000000000000000000000000" +
|
||||
txID := newTxIDFromStr("00000000000000000000000000000000" +
|
||||
"00000000000000000123456789abcdef")
|
||||
b.Transactions[1].TxIn[0].PreviousOutPoint.TxID = *hash
|
||||
b.Transactions[1].TxIn[0].PreviousOutPoint.TxID = *txID
|
||||
b.Transactions[1].TxIn[0].PreviousOutPoint.Index = 0
|
||||
})
|
||||
rejected(blockdag.ErrMissingTxOut)
|
||||
|
@ -29,6 +29,18 @@ func newHashFromStr(hexStr string) *daghash.Hash {
|
||||
return hash
|
||||
}
|
||||
|
||||
// newTxIDFromStr converts the passed big-endian hex string into a
|
||||
// wire.TxID. It only differs from the one available in daghash in that
|
||||
// it panics on an error since it will only (and must only) be called with
|
||||
// hard-coded, and therefore known good, hashes.
|
||||
func newTxIDFromStr(hexStr string) *daghash.TxID {
|
||||
txID, err := daghash.NewTxIDFromStr(hexStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return txID
|
||||
}
|
||||
|
||||
// fromHex converts the passed hex string into a byte slice and will panic if
|
||||
// there is an error. This is only provided for the hard-coded constants so
|
||||
// errors in the source code can be detected. It will only (and must only) be
|
||||
@ -65,7 +77,7 @@ var (
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: fromHex("04ffff001d010445" +
|
||||
|
@ -569,8 +569,8 @@ type AddrIndex struct {
|
||||
// This allows fairly efficient updates when transactions are removed
|
||||
// once they are included into a block.
|
||||
unconfirmedLock sync.RWMutex
|
||||
txnsByAddr map[[addrKeySize]byte]map[daghash.Hash]*util.Tx
|
||||
addrsByTx map[daghash.Hash]map[[addrKeySize]byte]struct{}
|
||||
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
|
||||
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
|
||||
}
|
||||
|
||||
// Ensure the AddrIndex type implements the Indexer interface.
|
||||
@ -808,7 +808,7 @@ func (idx *AddrIndex) indexUnconfirmedAddresses(pkScript []byte, tx *util.Tx) {
|
||||
idx.unconfirmedLock.Lock()
|
||||
addrIndexEntry := idx.txnsByAddr[addrKey]
|
||||
if addrIndexEntry == nil {
|
||||
addrIndexEntry = make(map[daghash.Hash]*util.Tx)
|
||||
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
|
||||
idx.txnsByAddr[addrKey] = addrIndexEntry
|
||||
}
|
||||
addrIndexEntry[*tx.ID()] = tx
|
||||
@ -860,22 +860,22 @@ func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
|
||||
// (memory-only) address index.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *AddrIndex) RemoveUnconfirmedTx(hash *daghash.Hash) {
|
||||
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
|
||||
idx.unconfirmedLock.Lock()
|
||||
defer idx.unconfirmedLock.Unlock()
|
||||
|
||||
// Remove all address references to the transaction from the address
|
||||
// index and remove the entry for the address altogether if it no longer
|
||||
// references any transactions.
|
||||
for addrKey := range idx.addrsByTx[*hash] {
|
||||
delete(idx.txnsByAddr[addrKey], *hash)
|
||||
for addrKey := range idx.addrsByTx[*txID] {
|
||||
delete(idx.txnsByAddr[addrKey], *txID)
|
||||
if len(idx.txnsByAddr[addrKey]) == 0 {
|
||||
delete(idx.txnsByAddr, addrKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entry from the transaction to address lookup map as well.
|
||||
delete(idx.addrsByTx, *hash)
|
||||
delete(idx.addrsByTx, *txID)
|
||||
}
|
||||
|
||||
// UnconfirmedTxnsForAddress returns all transactions currently in the
|
||||
@ -917,8 +917,8 @@ func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
|
||||
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
|
||||
return &AddrIndex{
|
||||
dagParams: dagParams,
|
||||
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.Hash]*util.Tx),
|
||||
addrsByTx: make(map[daghash.Hash]map[[addrKeySize]byte]struct{}),
|
||||
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
|
||||
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
var prevHeader *daghash.Hash
|
||||
header := block.MsgBlock().Header
|
||||
if header.IsGenesis() {
|
||||
prevHeader = &daghash.Zero
|
||||
prevHeader = &daghash.ZeroHash
|
||||
} else {
|
||||
ph := header.SelectedParentHash()
|
||||
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
|
||||
|
@ -171,8 +171,8 @@ func putAcceptingBlocksEntry(target []byte, includingBlockID uint32) {
|
||||
byteOrder.PutUint32(target, includingBlockID)
|
||||
}
|
||||
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txHash *daghash.Hash, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txHash[:])
|
||||
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -181,8 +181,8 @@ func dbPutIncludingBlocksEntry(dbTx database.Tx, txHash *daghash.Hash, blockID u
|
||||
return bucket.Put(blockIDBytes, serializedData)
|
||||
}
|
||||
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txHash *daghash.Hash, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txHash[:])
|
||||
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint32, serializedData []byte) error {
|
||||
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -198,14 +198,14 @@ func dbPutAcceptingBlocksEntry(dbTx database.Tx, txHash *daghash.Hash, blockID u
|
||||
//
|
||||
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
|
||||
// returns the first block region that is stored in the txindex.
|
||||
func dbFetchFirstTxRegion(dbTx database.Tx, txHash *daghash.Hash) (*database.BlockRegion, error) {
|
||||
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
// Load the record from the database and return now if it doesn't exist.
|
||||
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txHash[:])
|
||||
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
|
||||
if txBucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
"was found for %s", txHash),
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := txBucket.Cursor()
|
||||
@ -213,7 +213,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txHash *daghash.Hash) (*database.Blo
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No block region"+
|
||||
"was found for %s", txHash),
|
||||
"was found for %s", txID),
|
||||
}
|
||||
}
|
||||
blockIDBytes := cursor.Key()
|
||||
@ -227,7 +227,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txHash *daghash.Hash) (*database.Blo
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s", txHash),
|
||||
"entry for %s", txID),
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ func dbFetchFirstTxRegion(dbTx database.Tx, txHash *daghash.Hash) (*database.Blo
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("corrupt transaction index "+
|
||||
"entry for %s: %v", txHash, err),
|
||||
"entry for %s: %v", txID, err),
|
||||
}
|
||||
}
|
||||
|
||||
@ -449,11 +449,11 @@ func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ *blockda
|
||||
// will be returned for the both the entry and the error.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (idx *TxIndex) TxFirstBlockRegion(hash *daghash.Hash) (*database.BlockRegion, error) {
|
||||
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
|
||||
var region *database.BlockRegion
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
region, err = dbFetchFirstTxRegion(dbTx, hash)
|
||||
region, err = dbFetchFirstTxRegion(dbTx, txID)
|
||||
return err
|
||||
})
|
||||
return region, err
|
||||
@ -499,23 +499,23 @@ func dbFetchTxBlocks(dbTx database.Tx, txHash *daghash.Hash) ([]daghash.Hash, er
|
||||
}
|
||||
|
||||
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
|
||||
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txHash *daghash.Hash) (*daghash.Hash, error) {
|
||||
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
|
||||
var acceptingBlock *daghash.Hash
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txHash, dag)
|
||||
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
|
||||
return err
|
||||
})
|
||||
return acceptingBlock, err
|
||||
}
|
||||
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txHash *daghash.Hash, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txHash[:])
|
||||
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
|
||||
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
|
||||
if bucket == nil {
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txHash),
|
||||
"were found for %s", txID),
|
||||
}
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
@ -523,7 +523,7 @@ func dbFetchTxAcceptingBlock(dbTx database.Tx, txHash *daghash.Hash, dag *blockd
|
||||
return nil, database.Error{
|
||||
ErrorCode: database.ErrCorruption,
|
||||
Description: fmt.Sprintf("No accepting blocks "+
|
||||
"were found for %s", txHash),
|
||||
"were found for %s", txID),
|
||||
}
|
||||
}
|
||||
for ; cursor.Key() != nil; cursor.Next() {
|
||||
|
@ -136,7 +136,7 @@ var block1 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -194,7 +194,7 @@ var block2 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -221,7 +221,7 @@ var block2 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{
|
||||
TxID: daghash.TxID{
|
||||
0x71, 0x17, 0x72, 0x40, 0x46, 0x3e, 0x00, 0x87,
|
||||
0x00, 0x55, 0x61, 0xbf, 0x85, 0x88, 0x16, 0x2d,
|
||||
0xe9, 0x75, 0x89, 0x10, 0x8f, 0x27, 0x7c, 0xb6,
|
||||
@ -255,7 +255,7 @@ var block3Tx = &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{
|
||||
TxID: daghash.TxID{
|
||||
0x54, 0x87, 0x57, 0x84, 0xed, 0x18, 0xc2, 0xde,
|
||||
0x6c, 0xdb, 0x54, 0xfa, 0xab, 0x4f, 0x1f, 0x52,
|
||||
0x73, 0x4b, 0xbb, 0x62, 0x79, 0x84, 0x95, 0xbe,
|
||||
@ -330,7 +330,7 @@ var block3 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -389,7 +389,7 @@ var block3A = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -448,7 +448,7 @@ var block4 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -512,7 +512,7 @@ var block5 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
|
@ -61,7 +61,7 @@ func BuildHashMerkleTreeStore(transactions []*util.Tx) MerkleTree {
|
||||
func BuildIDMerkleTreeStore(transactions []*util.Tx) MerkleTree {
|
||||
txIDs := make([]*daghash.Hash, len(transactions))
|
||||
for i, tx := range transactions {
|
||||
txIDs[i] = tx.ID()
|
||||
txIDs[i] = (*daghash.Hash)(tx.ID())
|
||||
}
|
||||
return buildMerkleTreeStore(txIDs)
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, su
|
||||
|
||||
for i := uint32(0); i < numInputs; i++ {
|
||||
tx.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{}, i),
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{}, i),
|
||||
SignatureScript: []byte{},
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
})
|
||||
@ -156,7 +156,7 @@ func createCoinbaseTxForTest(blockHeight int32, numOutputs uint32, extraNonce in
|
||||
tx.AddTxIn(&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{},
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
SignatureScript: coinbaseScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
|
@ -13,10 +13,10 @@ import (
|
||||
|
||||
// TestUTXOCollection makes sure that utxoCollection cloning and string representations work as expected.
|
||||
func TestUTXOCollection(t *testing.T) {
|
||||
hash0, _ := daghash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
hash1, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(hash0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(hash1, 0)
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 20}, false, 1)
|
||||
|
||||
@ -72,10 +72,10 @@ func TestUTXOCollection(t *testing.T) {
|
||||
|
||||
// TestUTXODiff makes sure that utxoDiff creation, cloning, and string representations work as expected.
|
||||
func TestUTXODiff(t *testing.T) {
|
||||
hash0, _ := daghash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
hash1, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(hash0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(hash1, 0)
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
utxoEntry1 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 20}, false, 1)
|
||||
diff := UTXODiff{
|
||||
@ -111,8 +111,8 @@ func TestUTXODiff(t *testing.T) {
|
||||
// TestUTXODiffRules makes sure that all diffFrom and WithDiff rules are followed.
|
||||
// Each test case represents a cell in the two tables outlined in the documentation for utxoDiff.
|
||||
func TestUTXODiffRules(t *testing.T) {
|
||||
hash0, _ := daghash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
outPoint0 := *wire.NewOutPoint(hash0, 0)
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
utxoEntry0 := NewUTXOEntry(&wire.TxOut{PkScript: []byte{}, Value: 10}, true, 0)
|
||||
|
||||
// For each of the following test cases, we will:
|
||||
@ -327,10 +327,10 @@ func TestUTXODiffRules(t *testing.T) {
|
||||
|
||||
// TestFullUTXOSet makes sure that fullUTXOSet is working as expected.
|
||||
func TestFullUTXOSet(t *testing.T) {
|
||||
hash0, _ := daghash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
hash1, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(hash0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(hash1, 0)
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
txOut0 := &wire.TxOut{PkScript: []byte{}, Value: 10}
|
||||
txOut1 := &wire.TxOut{PkScript: []byte{}, Value: 20}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
@ -360,7 +360,7 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test fullUTXOSet addTx
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: *hash0, Index: 0}, Sequence: 0}
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: *txID0, Index: 0}, Sequence: 0}
|
||||
transaction0 := wire.NewMsgTx(1)
|
||||
transaction0.TxIn = []*wire.TxIn{txIn0}
|
||||
transaction0.TxOut = []*wire.TxOut{txOut0}
|
||||
@ -389,10 +389,10 @@ func TestFullUTXOSet(t *testing.T) {
|
||||
|
||||
// TestDiffUTXOSet makes sure that diffUTXOSet is working as expected.
|
||||
func TestDiffUTXOSet(t *testing.T) {
|
||||
hash0, _ := daghash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
hash1, _ := daghash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(hash0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(hash1, 0)
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txID1, _ := daghash.NewTxIDFromStr("1111111111111111111111111111111111111111111111111111111111111111")
|
||||
outPoint0 := *wire.NewOutPoint(txID0, 0)
|
||||
outPoint1 := *wire.NewOutPoint(txID1, 0)
|
||||
txOut0 := &wire.TxOut{PkScript: []byte{}, Value: 10}
|
||||
txOut1 := &wire.TxOut{PkScript: []byte{}, Value: 20}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
@ -634,8 +634,8 @@ func TestUTXOSetDiffRules(t *testing.T) {
|
||||
// TestDiffUTXOSet_addTx makes sure that diffUTXOSet addTx works as expected
|
||||
func TestDiffUTXOSet_addTx(t *testing.T) {
|
||||
// transaction0 is coinbase. As such, it has exactly one input with hash zero and MaxUInt32 index
|
||||
hash0, _ := daghash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: *hash0, Index: math.MaxUint32}, Sequence: 0}
|
||||
txID0, _ := daghash.NewTxIDFromStr("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
txIn0 := &wire.TxIn{SignatureScript: []byte{}, PreviousOutPoint: wire.OutPoint{TxID: *txID0, Index: math.MaxUint32}, Sequence: 0}
|
||||
txOut0 := &wire.TxOut{PkScript: []byte{0}, Value: 10}
|
||||
utxoEntry0 := NewUTXOEntry(txOut0, true, 0)
|
||||
transaction0 := wire.NewMsgTx(1)
|
||||
@ -918,7 +918,7 @@ func TestDiffFromTx(t *testing.T) {
|
||||
//Test that we get an error if we don't have the outpoint inside the utxo set
|
||||
invalidTx := wire.NewMsgTx(wire.TxVersion)
|
||||
invalidTx.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{TxID: daghash.Hash{}, Index: 0},
|
||||
PreviousOutPoint: wire.OutPoint{TxID: daghash.TxID{}, Index: 0},
|
||||
SignatureScript: nil,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
})
|
||||
|
@ -49,17 +49,10 @@ const (
|
||||
MaxOutputsPerBlock = wire.MaxBlockPayload / wire.MinTxOutPayload
|
||||
)
|
||||
|
||||
var (
|
||||
// zeroHash is the zero value for a daghash.Hash and is defined as
|
||||
// a package level variable to avoid the need to create a new instance
|
||||
// every time a check is needed.
|
||||
zeroHash daghash.Hash
|
||||
)
|
||||
|
||||
// isNullOutpoint determines whether or not a previous transaction output point
|
||||
// is set.
|
||||
func isNullOutpoint(outpoint *wire.OutPoint) bool {
|
||||
if outpoint.Index == math.MaxUint32 && outpoint.TxID == zeroHash {
|
||||
if outpoint.Index == math.MaxUint32 && outpoint.TxID == daghash.ZeroTxID {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -545,7 +538,7 @@ func (dag *BlockDAG) checkBlockSanity(block *util.Block, flags BehaviorFlags) er
|
||||
// Check for duplicate transactions. This check will be fairly quick
|
||||
// since the transaction IDs are already cached due to building the
|
||||
// merkle tree above.
|
||||
existingTxIDs := make(map[daghash.Hash]struct{})
|
||||
existingTxIDs := make(map[daghash.TxID]struct{})
|
||||
for _, tx := range transactions {
|
||||
id := tx.ID()
|
||||
if _, exists := existingTxIDs[*id]; exists {
|
||||
|
@ -234,7 +234,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -261,7 +261,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
|
||||
@ -331,7 +331,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
|
||||
@ -400,7 +400,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90,
|
||||
@ -471,7 +471,7 @@ func TestCheckBlockSanity(t *testing.T) {
|
||||
// and handled properly.
|
||||
func TestCheckSerializedHeight(t *testing.T) {
|
||||
// Create an empty coinbase template to be used in the tests below.
|
||||
coinbaseOutpoint := wire.NewOutPoint(&daghash.Hash{}, math.MaxUint32)
|
||||
coinbaseOutpoint := wire.NewOutPoint(&daghash.TxID{}, math.MaxUint32)
|
||||
coinbaseTx := wire.NewMsgTx(1)
|
||||
coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil))
|
||||
|
||||
@ -676,7 +676,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -703,7 +703,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
|
||||
@ -773,7 +773,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
|
||||
@ -842,7 +842,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90,
|
||||
@ -931,7 +931,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -965,7 +965,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
|
||||
@ -1035,7 +1035,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
|
||||
@ -1104,7 +1104,7 @@ var BlockWithWrongTxOrder = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90,
|
||||
|
@ -100,7 +100,7 @@ func NewNotifyReceivedCmd(addresses []string) *NotifyReceivedCmd {
|
||||
// OutPoint describes a transaction outpoint that will be marshalled to and
|
||||
// from JSON.
|
||||
type OutPoint struct {
|
||||
Hash string `json:"hash"`
|
||||
TxID string `json:"txid"`
|
||||
Index uint32 `json:"index"`
|
||||
}
|
||||
|
||||
|
@ -143,49 +143,49 @@ func TestDAGSvrWsCmds(t *testing.T) {
|
||||
{
|
||||
name: "notifySpent",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("notifySpent", `[{"hash":"123","index":0}]`)
|
||||
return btcjson.NewCmd("notifySpent", `[{"txid":"123","index":0}]`)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
ops := []btcjson.OutPoint{{Hash: "123", Index: 0}}
|
||||
ops := []btcjson.OutPoint{{TxID: "123", Index: 0}}
|
||||
return btcjson.NewNotifySpentCmd(ops)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"notifySpent","params":[[{"hash":"123","index":0}]],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"notifySpent","params":[[{"txid":"123","index":0}]],"id":1}`,
|
||||
unmarshalled: &btcjson.NotifySpentCmd{
|
||||
OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}},
|
||||
OutPoints: []btcjson.OutPoint{{TxID: "123", Index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopNotifySpent",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("stopNotifySpent", `[{"hash":"123","index":0}]`)
|
||||
return btcjson.NewCmd("stopNotifySpent", `[{"txid":"123","index":0}]`)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
ops := []btcjson.OutPoint{{Hash: "123", Index: 0}}
|
||||
ops := []btcjson.OutPoint{{TxID: "123", Index: 0}}
|
||||
return btcjson.NewStopNotifySpentCmd(ops)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"stopNotifySpent","params":[[{"hash":"123","index":0}]],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"stopNotifySpent","params":[[{"txid":"123","index":0}]],"id":1}`,
|
||||
unmarshalled: &btcjson.StopNotifySpentCmd{
|
||||
OutPoints: []btcjson.OutPoint{{Hash: "123", Index: 0}},
|
||||
OutPoints: []btcjson.OutPoint{{TxID: "123", Index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "loadTxFilter",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("loadTxFilter", false, `["1Address"]`, `[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]`)
|
||||
return btcjson.NewCmd("loadTxFilter", false, `["1Address"]`, `[{"txid":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]`)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
addrs := []string{"1Address"}
|
||||
ops := []btcjson.OutPoint{{
|
||||
Hash: "0000000000000000000000000000000000000000000000000000000000000123",
|
||||
TxID: "0000000000000000000000000000000000000000000000000000000000000123",
|
||||
Index: 0,
|
||||
}}
|
||||
return btcjson.NewLoadTxFilterCmd(false, addrs, ops)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"loadTxFilter","params":[false,["1Address"],[{"hash":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"loadTxFilter","params":[false,["1Address"],[{"txid":"0000000000000000000000000000000000000000000000000000000000000123","index":0}]],"id":1}`,
|
||||
unmarshalled: &btcjson.LoadTxFilterCmd{
|
||||
Reload: false,
|
||||
Addresses: []string{"1Address"},
|
||||
OutPoints: []btcjson.OutPoint{{Hash: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}},
|
||||
OutPoints: []btcjson.OutPoint{{TxID: "0000000000000000000000000000000000000000000000000000000000000123", Index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -27,6 +27,9 @@ var ErrHashStrSize = fmt.Errorf("max hash string length is %v bytes", MaxHashStr
|
||||
// typically represents the double sha256 of data.
|
||||
type Hash [HashSize]byte
|
||||
|
||||
// TxID is transaction hash not including payload and signature.
|
||||
type TxID Hash
|
||||
|
||||
// String returns the Hash as the hexadecimal string of the byte-reversed
|
||||
// hash.
|
||||
func (hash Hash) String() string {
|
||||
@ -36,6 +39,12 @@ func (hash Hash) String() string {
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// String returns the TxId as the hexadecimal string of the byte-reversed
|
||||
// hash.
|
||||
func (txID TxID) String() string {
|
||||
return Hash(txID).String()
|
||||
}
|
||||
|
||||
// Strings returns a slice of strings representing the hashes in the given slice of hashes
|
||||
func Strings(hashes []Hash) []string {
|
||||
strings := make([]string, len(hashes))
|
||||
@ -58,6 +67,15 @@ func (hash *Hash) CloneBytes() []byte {
|
||||
return newHash
|
||||
}
|
||||
|
||||
// CloneBytes returns a copy of the bytes which represent the TxID as a byte
|
||||
// slice.
|
||||
//
|
||||
// NOTE: It is generally cheaper to just slice the hash directly thereby reusing
|
||||
// the same bytes rather than calling this method.
|
||||
func (txID *TxID) CloneBytes() []byte {
|
||||
return (*Hash)(txID).CloneBytes()
|
||||
}
|
||||
|
||||
// SetBytes sets the bytes which represent the hash. An error is returned if
|
||||
// the number of bytes passed in is not HashSize.
|
||||
func (hash *Hash) SetBytes(newHash []byte) error {
|
||||
@ -71,6 +89,12 @@ func (hash *Hash) SetBytes(newHash []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBytes sets the bytes which represent the TxID. An error is returned if
|
||||
// the number of bytes passed in is not HashSize.
|
||||
func (txID *TxID) SetBytes(newID []byte) error {
|
||||
return (*Hash)(txID).SetBytes(newID)
|
||||
}
|
||||
|
||||
// IsEqual returns true if target is the same as hash.
|
||||
func (hash *Hash) IsEqual(target *Hash) bool {
|
||||
if hash == nil && target == nil {
|
||||
@ -82,6 +106,11 @@ func (hash *Hash) IsEqual(target *Hash) bool {
|
||||
return *hash == *target
|
||||
}
|
||||
|
||||
// IsEqual returns true if target is the same as TxID.
|
||||
func (txID *TxID) IsEqual(target *TxID) bool {
|
||||
return (*Hash)(txID).IsEqual((*Hash)(target))
|
||||
}
|
||||
|
||||
// AreEqual returns true if both slices contain the same hashes.
|
||||
// Either slice must not contain duplicates.
|
||||
func AreEqual(first []Hash, second []Hash) bool {
|
||||
@ -109,6 +138,13 @@ func NewHash(newHash []byte) (*Hash, error) {
|
||||
return &sh, err
|
||||
}
|
||||
|
||||
// NewTxID returns a new TxID from a byte slice. An error is returned if
|
||||
// the number of bytes passed in is not HashSize.
|
||||
func NewTxID(newTxID []byte) (*TxID, error) {
|
||||
hash, err := NewHash(newTxID)
|
||||
return (*TxID)(hash), err
|
||||
}
|
||||
|
||||
// NewHashFromStr creates a Hash from a hash string. The string should be
|
||||
// the hexadecimal string of a byte-reversed hash, but any missing characters
|
||||
// result in zero padding at the end of the Hash.
|
||||
@ -121,6 +157,14 @@ func NewHashFromStr(hash string) (*Hash, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// NewTxIDFromStr creates a TxID from a hash string. The string should be
|
||||
// the hexadecimal string of a byte-reversed hash, but any missing characters
|
||||
// result in zero padding at the end of the Hash.
|
||||
func NewTxIDFromStr(idStr string) (*TxID, error) {
|
||||
hash, err := NewHashFromStr(idStr)
|
||||
return (*TxID)(hash), err
|
||||
}
|
||||
|
||||
// Decode decodes the byte-reversed hexadecimal string encoding of a Hash to a
|
||||
// destination.
|
||||
func Decode(dst *Hash, src string) error {
|
||||
@ -197,6 +241,10 @@ func Sort(hashes []Hash) {
|
||||
})
|
||||
}
|
||||
|
||||
// Zero is the Hash value of all zero bytes, defined here for
|
||||
// ZeroHash is the Hash value of all zero bytes, defined here for
|
||||
// convenience.
|
||||
var Zero Hash
|
||||
var ZeroHash Hash
|
||||
|
||||
// ZeroTxID is the Hash value of all zero bytes, defined here for
|
||||
// convenience.
|
||||
var ZeroTxID TxID
|
||||
|
@ -19,7 +19,7 @@ var genesisCoinbaseTx = wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/daglabs/btcd/dagconfig/daghash"
|
||||
"github.com/daglabs/btcd/database"
|
||||
"github.com/daglabs/btcd/util"
|
||||
"github.com/daglabs/btcd/wire"
|
||||
@ -30,10 +29,6 @@ var (
|
||||
InFile: "bootstrap.dat",
|
||||
Progress: 10,
|
||||
}
|
||||
|
||||
// zeroHash is a simply a hash with all zeros. It is defined here to
|
||||
// avoid creating it multiple times.
|
||||
zeroHash = daghash.Hash{}
|
||||
)
|
||||
|
||||
// importResults houses the stats and result as an import operation.
|
||||
|
@ -109,7 +109,7 @@ func createCoinbaseTx(coinbaseScript []byte, nextBlockHeight int32,
|
||||
tx.AddTxIn(&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{},
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
SignatureScript: coinbaseScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
|
@ -246,7 +246,7 @@ func (m *memWallet) chainSyncer() {
|
||||
|
||||
// evalOutputs evaluates each of the passed outputs, creating a new matching
|
||||
// utxo within the wallet if we're able to spend the output.
|
||||
func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txHash *daghash.Hash,
|
||||
func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txID *daghash.TxID,
|
||||
isCoinbase bool, undo *undoEntry) {
|
||||
|
||||
for i, output := range outputs {
|
||||
@ -268,7 +268,7 @@ func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txHash *daghash.Hash,
|
||||
maturityHeight = m.currentHeight + int32(m.net.CoinbaseMaturity)
|
||||
}
|
||||
|
||||
op := wire.OutPoint{TxID: *txHash, Index: uint32(i)}
|
||||
op := wire.OutPoint{TxID: *txID, Index: uint32(i)}
|
||||
m.utxos[op] = &utxo{
|
||||
value: util.Amount(output.Value),
|
||||
keyIndex: keyIndex,
|
||||
|
@ -93,8 +93,8 @@ func NewSatoshiPerByte(fee util.Amount, size uint32) SatoshiPerByte {
|
||||
// observedTransaction represents an observed transaction and some
|
||||
// additional data required for the fee estimation algorithm.
|
||||
type observedTransaction struct {
|
||||
// A transaction hash.
|
||||
hash daghash.Hash
|
||||
// A transaction ID.
|
||||
id daghash.TxID
|
||||
|
||||
// The fee per byte of the transaction in satoshis.
|
||||
feeRate SatoshiPerByte
|
||||
@ -108,7 +108,7 @@ type observedTransaction struct {
|
||||
}
|
||||
|
||||
func (o *observedTransaction) Serialize(w io.Writer) {
|
||||
binary.Write(w, binary.BigEndian, o.hash)
|
||||
binary.Write(w, binary.BigEndian, o.id)
|
||||
binary.Write(w, binary.BigEndian, o.feeRate)
|
||||
binary.Write(w, binary.BigEndian, o.observed)
|
||||
binary.Write(w, binary.BigEndian, o.mined)
|
||||
@ -118,7 +118,7 @@ func deserializeObservedTransaction(r io.Reader) (*observedTransaction, error) {
|
||||
ot := observedTransaction{}
|
||||
|
||||
// The first 32 bytes should be a hash.
|
||||
binary.Read(r, binary.BigEndian, &ot.hash)
|
||||
binary.Read(r, binary.BigEndian, &ot.id)
|
||||
|
||||
// The next 8 are SatoshiPerByte
|
||||
binary.Read(r, binary.BigEndian, &ot.feeRate)
|
||||
@ -169,7 +169,7 @@ type FeeEstimator struct {
|
||||
numBlocksRegistered uint32
|
||||
|
||||
mtx sync.RWMutex
|
||||
observed map[daghash.Hash]*observedTransaction
|
||||
observed map[daghash.TxID]*observedTransaction
|
||||
bin [estimateFeeDepth][]*observedTransaction
|
||||
|
||||
// The cached estimates.
|
||||
@ -190,7 +190,7 @@ func NewFeeEstimator(maxRollback, minRegisteredBlocks uint32) *FeeEstimator {
|
||||
lastKnownHeight: mining.UnminedHeight,
|
||||
binSize: estimateFeeBinSize,
|
||||
maxReplacements: estimateFeeMaxReplacements,
|
||||
observed: make(map[daghash.Hash]*observedTransaction),
|
||||
observed: make(map[daghash.TxID]*observedTransaction),
|
||||
dropped: make([]*registeredBlock, 0, maxRollback),
|
||||
}
|
||||
}
|
||||
@ -206,12 +206,12 @@ func (ef *FeeEstimator) ObserveTransaction(t *TxDesc) {
|
||||
return
|
||||
}
|
||||
|
||||
hash := *t.Tx.ID()
|
||||
if _, ok := ef.observed[hash]; !ok {
|
||||
txID := *t.Tx.ID()
|
||||
if _, ok := ef.observed[txID]; !ok {
|
||||
size := uint32(t.Tx.MsgTx().SerializeSize())
|
||||
|
||||
ef.observed[hash] = &observedTransaction{
|
||||
hash: hash,
|
||||
ef.observed[txID] = &observedTransaction{
|
||||
id: txID,
|
||||
feeRate: NewSatoshiPerByte(util.Amount(t.Fee), size),
|
||||
observed: t.Height,
|
||||
mined: mining.UnminedHeight,
|
||||
@ -255,10 +255,10 @@ func (ef *FeeEstimator) RegisterBlock(block *util.Block) error {
|
||||
|
||||
// Go through the txs in the block.
|
||||
for t := range transactions {
|
||||
hash := *t.Hash()
|
||||
txID := *t.ID()
|
||||
|
||||
// Have we observed this tx in the mempool?
|
||||
o, ok := ef.observed[hash]
|
||||
o, ok := ef.observed[txID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@ -269,7 +269,7 @@ func (ef *FeeEstimator) RegisterBlock(block *util.Block) error {
|
||||
// This shouldn't happen if the fee estimator works correctly,
|
||||
// but return an error if it does.
|
||||
if o.mined != mining.UnminedHeight {
|
||||
log.Error("Estimate fee: transaction ", hash.String(), " has already been mined")
|
||||
log.Error("Estimate fee: transaction ", txID.String(), " has already been mined")
|
||||
return errors.New("Transaction has already been mined")
|
||||
}
|
||||
|
||||
@ -610,7 +610,7 @@ type observedTxSet []*observedTransaction
|
||||
func (q observedTxSet) Len() int { return len(q) }
|
||||
|
||||
func (q observedTxSet) Less(i, j int) bool {
|
||||
return strings.Compare(q[i].hash.String(), q[j].hash.String()) < 0
|
||||
return strings.Compare(q[i].id.String(), q[j].id.String()) < 0
|
||||
}
|
||||
|
||||
func (q observedTxSet) Swap(i, j int) {
|
||||
@ -691,7 +691,7 @@ func RestoreFeeEstimator(data FeeEstimatorState) (*FeeEstimator, error) {
|
||||
}
|
||||
|
||||
ef := &FeeEstimator{
|
||||
observed: make(map[daghash.Hash]*observedTransaction),
|
||||
observed: make(map[daghash.TxID]*observedTransaction),
|
||||
}
|
||||
|
||||
// Read basic parameters.
|
||||
@ -712,7 +712,7 @@ func RestoreFeeEstimator(data FeeEstimatorState) (*FeeEstimator, error) {
|
||||
return nil, err
|
||||
}
|
||||
observed[i] = ot
|
||||
ef.observed[ot.hash] = ot
|
||||
ef.observed[ot.id] = ot
|
||||
}
|
||||
|
||||
// Read bins.
|
||||
|
@ -24,7 +24,7 @@ func newTestFeeEstimator(binSize, maxReplacements, maxRollback uint32) *FeeEstim
|
||||
binSize: int32(binSize),
|
||||
minRegisteredBlocks: 0,
|
||||
maxReplacements: int32(maxReplacements),
|
||||
observed: make(map[daghash.Hash]*observedTransaction),
|
||||
observed: make(map[daghash.TxID]*observedTransaction),
|
||||
dropped: make([]*registeredBlock, 0, maxRollback),
|
||||
}
|
||||
}
|
||||
|
@ -172,11 +172,11 @@ type TxPool struct {
|
||||
|
||||
mtx sync.RWMutex
|
||||
cfg Config
|
||||
pool map[daghash.Hash]*TxDesc
|
||||
depends map[daghash.Hash]*TxDesc
|
||||
dependsByPrev map[wire.OutPoint]map[daghash.Hash]*TxDesc
|
||||
orphans map[daghash.Hash]*orphanTx
|
||||
orphansByPrev map[wire.OutPoint]map[daghash.Hash]*util.Tx
|
||||
pool map[daghash.TxID]*TxDesc
|
||||
depends map[daghash.TxID]*TxDesc
|
||||
dependsByPrev map[wire.OutPoint]map[daghash.TxID]*TxDesc
|
||||
orphans map[daghash.TxID]*orphanTx
|
||||
orphansByPrev map[wire.OutPoint]map[daghash.TxID]*util.Tx
|
||||
outpoints map[wire.OutPoint]*util.Tx
|
||||
pennyTotal float64 // exponentially decaying total for penny spends.
|
||||
lastPennyUnix int64 // unix time of last ``penny spend''
|
||||
@ -336,7 +336,7 @@ func (mp *TxPool) addOrphan(tx *util.Tx, tag Tag) {
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if _, exists := mp.orphansByPrev[txIn.PreviousOutPoint]; !exists {
|
||||
mp.orphansByPrev[txIn.PreviousOutPoint] =
|
||||
make(map[daghash.Hash]*util.Tx)
|
||||
make(map[daghash.TxID]*util.Tx)
|
||||
}
|
||||
mp.orphansByPrev[txIn.PreviousOutPoint][*tx.ID()] = tx
|
||||
}
|
||||
@ -393,7 +393,7 @@ func (mp *TxPool) removeOrphanDoubleSpends(tx *util.Tx) {
|
||||
// exists in the main pool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) isTransactionInPool(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) isTransactionInPool(hash *daghash.TxID) bool {
|
||||
if _, exists := mp.pool[*hash]; exists {
|
||||
return true
|
||||
}
|
||||
@ -404,7 +404,7 @@ func (mp *TxPool) isTransactionInPool(hash *daghash.Hash) bool {
|
||||
// exists in the main pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) IsTransactionInPool(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) IsTransactionInPool(hash *daghash.TxID) bool {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.RLock()
|
||||
inPool := mp.isTransactionInPool(hash)
|
||||
@ -417,7 +417,7 @@ func (mp *TxPool) IsTransactionInPool(hash *daghash.Hash) bool {
|
||||
// exists in the depend pool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) isInDependPool(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) isInDependPool(hash *daghash.TxID) bool {
|
||||
if _, exists := mp.depends[*hash]; exists {
|
||||
return true
|
||||
}
|
||||
@ -429,7 +429,7 @@ func (mp *TxPool) isInDependPool(hash *daghash.Hash) bool {
|
||||
// exists in the main pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) IsInDependPool(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) IsInDependPool(hash *daghash.TxID) bool {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.RLock()
|
||||
defer mp.mtx.RUnlock()
|
||||
@ -440,7 +440,7 @@ func (mp *TxPool) IsInDependPool(hash *daghash.Hash) bool {
|
||||
// in the orphan pool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) isOrphanInPool(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) isOrphanInPool(hash *daghash.TxID) bool {
|
||||
if _, exists := mp.orphans[*hash]; exists {
|
||||
return true
|
||||
}
|
||||
@ -452,7 +452,7 @@ func (mp *TxPool) isOrphanInPool(hash *daghash.Hash) bool {
|
||||
// in the orphan pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) IsOrphanInPool(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) IsOrphanInPool(hash *daghash.TxID) bool {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.RLock()
|
||||
inPool := mp.isOrphanInPool(hash)
|
||||
@ -465,7 +465,7 @@ func (mp *TxPool) IsOrphanInPool(hash *daghash.Hash) bool {
|
||||
// in the main pool or in the orphan pool.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) haveTransaction(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) haveTransaction(hash *daghash.TxID) bool {
|
||||
return mp.isTransactionInPool(hash) || mp.isOrphanInPool(hash)
|
||||
}
|
||||
|
||||
@ -473,7 +473,7 @@ func (mp *TxPool) haveTransaction(hash *daghash.Hash) bool {
|
||||
// in the main pool or in the orphan pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) HaveTransaction(hash *daghash.Hash) bool {
|
||||
func (mp *TxPool) HaveTransaction(hash *daghash.TxID) bool {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.RLock()
|
||||
haveTx := mp.haveTransaction(hash)
|
||||
@ -628,7 +628,7 @@ func (mp *TxPool) addTransaction(tx *util.Tx, height int32, fee uint64, parentsI
|
||||
mp.depends[*tx.ID()] = txD
|
||||
for _, previousOutPoint := range parentsInPool {
|
||||
if _, exists := mp.dependsByPrev[*previousOutPoint]; !exists {
|
||||
mp.dependsByPrev[*previousOutPoint] = make(map[daghash.Hash]*TxDesc)
|
||||
mp.dependsByPrev[*previousOutPoint] = make(map[daghash.TxID]*TxDesc)
|
||||
}
|
||||
mp.dependsByPrev[*previousOutPoint][*tx.ID()] = txD
|
||||
}
|
||||
@ -685,7 +685,7 @@ func (mp *TxPool) CheckSpend(op wire.OutPoint) *util.Tx {
|
||||
}
|
||||
|
||||
// This function MUST be called with the mempool lock held (for reads).
|
||||
func (mp *TxPool) fetchTransaction(txID *daghash.Hash) (*TxDesc, bool) {
|
||||
func (mp *TxPool) fetchTransaction(txID *daghash.TxID) (*TxDesc, bool) {
|
||||
txDesc, exists := mp.pool[*txID]
|
||||
if !exists {
|
||||
txDesc, exists = mp.depends[*txID]
|
||||
@ -698,7 +698,7 @@ func (mp *TxPool) fetchTransaction(txID *daghash.Hash) (*TxDesc, bool) {
|
||||
// orphans.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) FetchTransaction(txID *daghash.Hash) (*util.Tx, error) {
|
||||
func (mp *TxPool) FetchTransaction(txID *daghash.TxID) (*util.Tx, error) {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.RLock()
|
||||
defer mp.mtx.RUnlock()
|
||||
@ -715,7 +715,7 @@ func (mp *TxPool) FetchTransaction(txID *daghash.Hash) (*util.Tx, error) {
|
||||
// more details.
|
||||
//
|
||||
// This function MUST be called with the mempool lock held (for writes).
|
||||
func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, isNew, rateLimit, rejectDupOrphans bool) ([]*daghash.Hash, *TxDesc, error) {
|
||||
func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, isNew, rateLimit, rejectDupOrphans bool) ([]*daghash.TxID, *TxDesc, error) {
|
||||
mp.cfg.DAG.UTXORLock()
|
||||
defer mp.cfg.DAG.UTXORUnlock()
|
||||
txID := tx.ID()
|
||||
@ -830,7 +830,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, isNew, rateLimit, rejectDu
|
||||
// don't exist or are already spent. Adding orphans to the orphan pool
|
||||
// is not handled by this function, and the caller should use
|
||||
// maybeAddOrphan if this behavior is desired.
|
||||
var missingParents []*daghash.Hash
|
||||
var missingParents []*daghash.TxID
|
||||
var parentsInPool []*wire.OutPoint
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
if _, ok := mp.mpUTXOSet.Get(txIn.PreviousOutPoint); !ok {
|
||||
@ -1009,7 +1009,7 @@ func (mp *TxPool) maybeAcceptTransaction(tx *util.Tx, isNew, rateLimit, rejectDu
|
||||
// be added to the orphan pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) MaybeAcceptTransaction(tx *util.Tx, isNew, rateLimit bool) ([]*daghash.Hash, *TxDesc, error) {
|
||||
func (mp *TxPool) MaybeAcceptTransaction(tx *util.Tx, isNew, rateLimit bool) ([]*daghash.TxID, *TxDesc, error) {
|
||||
// Protect concurrent access.
|
||||
mp.mtx.Lock()
|
||||
hashes, txD, err := mp.maybeAcceptTransaction(tx, isNew, rateLimit, true)
|
||||
@ -1209,9 +1209,9 @@ func (mp *TxPool) DepCount() int {
|
||||
// pool.
|
||||
//
|
||||
// This function is safe for concurrent access.
|
||||
func (mp *TxPool) TxIDs() []*daghash.Hash {
|
||||
func (mp *TxPool) TxIDs() []*daghash.TxID {
|
||||
mp.mtx.RLock()
|
||||
ids := make([]*daghash.Hash, len(mp.pool))
|
||||
ids := make([]*daghash.TxID, len(mp.pool))
|
||||
i := 0
|
||||
for txID := range mp.pool {
|
||||
idCopy := txID
|
||||
@ -1348,11 +1348,11 @@ func New(cfg *Config) *TxPool {
|
||||
mpUTXO := blockdag.NewDiffUTXOSet(virtualUTXO, blockdag.NewUTXODiff())
|
||||
return &TxPool{
|
||||
cfg: *cfg,
|
||||
pool: make(map[daghash.Hash]*TxDesc),
|
||||
depends: make(map[daghash.Hash]*TxDesc),
|
||||
dependsByPrev: make(map[wire.OutPoint]map[daghash.Hash]*TxDesc),
|
||||
orphans: make(map[daghash.Hash]*orphanTx),
|
||||
orphansByPrev: make(map[wire.OutPoint]map[daghash.Hash]*util.Tx),
|
||||
pool: make(map[daghash.TxID]*TxDesc),
|
||||
depends: make(map[daghash.TxID]*TxDesc),
|
||||
dependsByPrev: make(map[wire.OutPoint]map[daghash.TxID]*TxDesc),
|
||||
orphans: make(map[daghash.TxID]*orphanTx),
|
||||
orphansByPrev: make(map[wire.OutPoint]map[daghash.TxID]*util.Tx),
|
||||
nextExpireScan: time.Now().Add(orphanExpireScanInterval),
|
||||
outpoints: make(map[wire.OutPoint]*util.Tx),
|
||||
mpUTXOSet: mpUTXO,
|
||||
|
@ -133,7 +133,7 @@ func (p *poolHarness) CreateCoinbaseTx(blockHeight int32, numOutputs uint32) (*u
|
||||
tx.AddTxIn(&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{},
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
SignatureScript: coinbaseScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
@ -484,7 +484,7 @@ func TestProcessTransaction(t *testing.T) {
|
||||
|
||||
orphanedTx, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{}, Index: 1},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{}, Index: 1},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -594,7 +594,7 @@ func TestProcessTransaction(t *testing.T) {
|
||||
t.Fatalf("Script: error creating wrappedP2shNonSigScript: %v", err)
|
||||
}
|
||||
|
||||
dummyPrevOutHash, err := daghash.NewHashFromStr("01")
|
||||
dummyPrevOutHash, err := daghash.NewTxIDFromStr("01")
|
||||
if err != nil {
|
||||
t.Fatalf("NewShaHashFromStr: unexpected error: %v", err)
|
||||
}
|
||||
@ -782,7 +782,7 @@ func TestAddrIndex(t *testing.T) {
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
enteredRemoveUnconfirmedTx := false
|
||||
guard = monkey.Patch((*indexers.AddrIndex).RemoveUnconfirmedTx, func(idx *indexers.AddrIndex, hash *daghash.Hash) {
|
||||
guard = monkey.Patch((*indexers.AddrIndex).RemoveUnconfirmedTx, func(idx *indexers.AddrIndex, hash *daghash.TxID) {
|
||||
enteredRemoveUnconfirmedTx = true
|
||||
})
|
||||
defer guard.Unpatch()
|
||||
@ -897,7 +897,7 @@ func TestFetchTransaction(t *testing.T) {
|
||||
|
||||
orphanedTx, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{1}, Index: 1},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{1}, Index: 1},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1060,7 +1060,7 @@ func TestOrphanExpiration(t *testing.T) {
|
||||
|
||||
expiredTx, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{}, Index: 0},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{}, Index: 0},
|
||||
}}, 1)
|
||||
harness.txPool.ProcessTransaction(expiredTx, true,
|
||||
false, 0)
|
||||
@ -1068,7 +1068,7 @@ func TestOrphanExpiration(t *testing.T) {
|
||||
|
||||
tx1, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{1}, Index: 0},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{1}, Index: 0},
|
||||
}}, 1)
|
||||
harness.txPool.ProcessTransaction(tx1, true,
|
||||
false, 0)
|
||||
@ -1083,7 +1083,7 @@ func TestOrphanExpiration(t *testing.T) {
|
||||
|
||||
tx2, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{2}, Index: 0},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{2}, Index: 0},
|
||||
}}, 1)
|
||||
harness.txPool.ProcessTransaction(tx2, true,
|
||||
false, 0)
|
||||
@ -1106,7 +1106,7 @@ func TestMaxOrphanTxSize(t *testing.T) {
|
||||
|
||||
tx, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{}, Index: 0},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{}, Index: 0},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1241,7 +1241,7 @@ func TestRemoveOrphansByTag(t *testing.T) {
|
||||
|
||||
orphanedTx1, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{1}, Index: 1},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{1}, Index: 1},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1250,7 +1250,7 @@ func TestRemoveOrphansByTag(t *testing.T) {
|
||||
false, 1)
|
||||
orphanedTx2, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{2}, Index: 2},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{2}, Index: 2},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1259,7 +1259,7 @@ func TestRemoveOrphansByTag(t *testing.T) {
|
||||
false, 1)
|
||||
orphanedTx3, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{3}, Index: 3},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{3}, Index: 3},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1269,7 +1269,7 @@ func TestRemoveOrphansByTag(t *testing.T) {
|
||||
|
||||
orphanedTx4, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{4}, Index: 4},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{4}, Index: 4},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1330,7 +1330,7 @@ func TestBasicOrphanRemoval(t *testing.T) {
|
||||
// and ensure the state of all other orphans are unaffected.
|
||||
nonChainedOrphanTx, err := harness.CreateSignedTx([]spendableOutpoint{{
|
||||
amount: util.Amount(5000000000),
|
||||
outPoint: wire.OutPoint{TxID: daghash.Hash{}, Index: 0},
|
||||
outPoint: wire.OutPoint{TxID: daghash.TxID{}, Index: 0},
|
||||
}}, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create signed tx: %v", err)
|
||||
@ -1740,7 +1740,7 @@ func TestHandleNewBlock(t *testing.T) {
|
||||
}()
|
||||
|
||||
// process messages pushed by HandleNewBlock
|
||||
blockTransnactions := make(map[daghash.Hash]int)
|
||||
blockTransnactions := make(map[daghash.TxID]int)
|
||||
for msg := range ch {
|
||||
blockTransnactions[*msg.Tx.ID()] = 1
|
||||
if *msg.Tx.ID() != *blockTx1.ID() {
|
||||
@ -1811,7 +1811,7 @@ var dummyBlock = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
|
@ -279,11 +279,11 @@ func TestDust(t *testing.T) {
|
||||
// TestCheckTransactionStandard tests the checkTransactionStandard API.
|
||||
func TestCheckTransactionStandard(t *testing.T) {
|
||||
// Create some dummy, but otherwise standard, data for transactions.
|
||||
prevOutHash, err := daghash.NewHashFromStr("01")
|
||||
prevOutTxID, err := daghash.NewTxIDFromStr("01")
|
||||
if err != nil {
|
||||
t.Fatalf("NewShaHashFromStr: unexpected error: %v", err)
|
||||
}
|
||||
dummyPrevOut := wire.OutPoint{TxID: *prevOutHash, Index: 1}
|
||||
dummyPrevOut := wire.OutPoint{TxID: *prevOutTxID, Index: 1}
|
||||
dummySigScript := bytes.Repeat([]byte{0x00}, 65)
|
||||
dummyTxIn := wire.TxIn{
|
||||
PreviousOutPoint: dummyPrevOut,
|
||||
|
@ -70,7 +70,7 @@ type TxSource interface {
|
||||
|
||||
// HaveTransaction returns whether or not the passed transaction hash
|
||||
// exists in the source pool.
|
||||
HaveTransaction(hash *daghash.Hash) bool
|
||||
HaveTransaction(txID *daghash.TxID) bool
|
||||
}
|
||||
|
||||
// txPrioItem houses a transaction along with extra information that allows the
|
||||
@ -248,7 +248,7 @@ func createCoinbaseTx(params *dagconfig.Params, coinbaseScript []byte, nextBlock
|
||||
tx.AddTxIn(&wire.TxIn{
|
||||
// Coinbase transactions have no inputs, so previous outpoint is
|
||||
// zero hash and max index.
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.Hash{},
|
||||
PreviousOutPoint: *wire.NewOutPoint(&daghash.TxID{},
|
||||
wire.MaxPrevOutIndex),
|
||||
SignatureScript: coinbaseScript,
|
||||
Sequence: wire.MaxTxInSequenceNum,
|
||||
|
@ -65,7 +65,7 @@ func createTxIn(originTx *wire.MsgTx, outputIndex uint32) *wire.TxIn {
|
||||
prevOut = wire.NewOutPoint(&originTxID, 0)
|
||||
} else {
|
||||
prevOut = &wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xFFFFFFFF,
|
||||
}
|
||||
}
|
||||
|
@ -41,9 +41,6 @@ const (
|
||||
maxRequestedTxns = wire.MaxInvPerMsg
|
||||
)
|
||||
|
||||
// zeroHash is the zero value hash (all zeros). It is defined as a convenience.
|
||||
var zeroHash daghash.Hash
|
||||
|
||||
// newPeerMsg signifies a newly connected peer to the block handler.
|
||||
type newPeerMsg struct {
|
||||
peer *peerpkg.Peer
|
||||
@ -135,7 +132,7 @@ type headerNode struct {
|
||||
type peerSyncState struct {
|
||||
syncCandidate bool
|
||||
requestQueue []*wire.InvVect
|
||||
requestedTxns map[daghash.Hash]struct{}
|
||||
requestedTxns map[daghash.TxID]struct{}
|
||||
requestedBlocks map[daghash.Hash]struct{}
|
||||
}
|
||||
|
||||
@ -157,8 +154,8 @@ type SyncManager struct {
|
||||
quit chan struct{}
|
||||
|
||||
// These fields should only be accessed from the blockHandler thread
|
||||
rejectedTxns map[daghash.Hash]struct{}
|
||||
requestedTxns map[daghash.Hash]struct{}
|
||||
rejectedTxns map[daghash.TxID]struct{}
|
||||
requestedTxns map[daghash.TxID]struct{}
|
||||
requestedBlocks map[daghash.Hash]struct{}
|
||||
syncPeer *peerpkg.Peer
|
||||
peerStates map[*peerpkg.Peer]*peerSyncState
|
||||
@ -293,7 +290,7 @@ func (sm *SyncManager) startSync() {
|
||||
"%d from peer %s", sm.dag.Height()+1,
|
||||
sm.nextCheckpoint.Height, bestPeer.Addr()) //TODO: (Ori) This is probably wrong. Done only for compilation
|
||||
} else {
|
||||
bestPeer.PushGetBlocksMsg(locator, &zeroHash)
|
||||
bestPeer.PushGetBlocksMsg(locator, &daghash.ZeroHash)
|
||||
}
|
||||
sm.syncPeer = bestPeer
|
||||
} else {
|
||||
@ -346,7 +343,7 @@ func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) {
|
||||
isSyncCandidate := sm.isSyncCandidate(peer)
|
||||
sm.peerStates[peer] = &peerSyncState{
|
||||
syncCandidate: isSyncCandidate,
|
||||
requestedTxns: make(map[daghash.Hash]struct{}),
|
||||
requestedTxns: make(map[daghash.TxID]struct{}),
|
||||
requestedBlocks: make(map[daghash.Hash]struct{}),
|
||||
}
|
||||
|
||||
@ -442,7 +439,7 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
|
||||
// Do not request this transaction again until a new block
|
||||
// has been processed.
|
||||
sm.rejectedTxns[*txID] = struct{}{}
|
||||
sm.limitMap(sm.rejectedTxns, maxRejectedTxns)
|
||||
sm.limitTxIDMap(sm.rejectedTxns, maxRejectedTxns)
|
||||
|
||||
// When the error is a rule error, it means the transaction was
|
||||
// simply rejected as opposed to something actually going wrong,
|
||||
@ -459,7 +456,7 @@ func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
|
||||
// Convert the error into an appropriate reject message and
|
||||
// send it.
|
||||
code, reason := mempool.ErrToRejectErr(err)
|
||||
peer.PushRejectMsg(wire.CmdTx, code, reason, txID, false)
|
||||
peer.PushRejectMsg(wire.CmdTx, code, reason, (*daghash.Hash)(txID), false)
|
||||
return
|
||||
}
|
||||
|
||||
@ -619,7 +616,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
|
||||
blkHashUpdate = &highestTipHash
|
||||
|
||||
// Clear the rejected transactions.
|
||||
sm.rejectedTxns = make(map[daghash.Hash]struct{})
|
||||
sm.rejectedTxns = make(map[daghash.TxID]struct{})
|
||||
}
|
||||
|
||||
// Update the block height for this peer. But only send a message to
|
||||
@ -678,7 +675,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
|
||||
sm.headerList.Init()
|
||||
log.Infof("Reached the final checkpoint -- switching to normal mode")
|
||||
locator := blockdag.BlockLocator([]*daghash.Hash{blockHash})
|
||||
err = peer.PushGetBlocksMsg(locator, &zeroHash)
|
||||
err = peer.PushGetBlocksMsg(locator, &daghash.ZeroHash)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to send getblocks message to peer %s: %v",
|
||||
peer.Addr(), err)
|
||||
@ -856,7 +853,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
|
||||
case wire.InvTypeTx:
|
||||
// Ask the transaction memory pool if the transaction is known
|
||||
// to it in any form (main pool or orphan).
|
||||
if sm.txMemPool.HaveTransaction(&invVect.Hash) {
|
||||
if sm.txMemPool.HaveTransaction((*daghash.TxID)(&invVect.Hash)) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -868,7 +865,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
|
||||
// checked because the vast majority of transactions consist of
|
||||
// two outputs where one is some form of "pay-to-somebody-else"
|
||||
// and the other is a change output.
|
||||
prevOut := wire.OutPoint{TxID: invVect.Hash}
|
||||
prevOut := wire.OutPoint{TxID: daghash.TxID(invVect.Hash)}
|
||||
for i := uint32(0); i < 2; i++ {
|
||||
prevOut.Index = i
|
||||
entry, ok := sm.dag.GetUTXOEntry(prevOut)
|
||||
@ -961,7 +958,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
||||
if iv.Type == wire.InvTypeTx {
|
||||
// Skip the transaction if it has already been
|
||||
// rejected.
|
||||
if _, exists := sm.rejectedTxns[iv.Hash]; exists {
|
||||
if _, exists := sm.rejectedTxns[daghash.TxID(iv.Hash)]; exists {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -1007,7 +1004,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
||||
// final one the remote peer knows about (zero
|
||||
// stop hash).
|
||||
locator := sm.dag.BlockLocatorFromHash(&iv.Hash)
|
||||
peer.PushGetBlocksMsg(locator, &zeroHash)
|
||||
peer.PushGetBlocksMsg(locator, &daghash.ZeroHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1028,7 +1025,7 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
||||
// request.
|
||||
if _, exists := sm.requestedBlocks[iv.Hash]; !exists {
|
||||
sm.requestedBlocks[iv.Hash] = struct{}{}
|
||||
sm.limitMap(sm.requestedBlocks, maxRequestedBlocks)
|
||||
sm.limitHashMap(sm.requestedBlocks, maxRequestedBlocks)
|
||||
state.requestedBlocks[iv.Hash] = struct{}{}
|
||||
|
||||
gdmsg.AddInvVect(iv)
|
||||
@ -1038,10 +1035,10 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
||||
case wire.InvTypeTx:
|
||||
// Request the transaction if there is not already a
|
||||
// pending request.
|
||||
if _, exists := sm.requestedTxns[iv.Hash]; !exists {
|
||||
sm.requestedTxns[iv.Hash] = struct{}{}
|
||||
sm.limitMap(sm.requestedTxns, maxRequestedTxns)
|
||||
state.requestedTxns[iv.Hash] = struct{}{}
|
||||
if _, exists := sm.requestedTxns[daghash.TxID(iv.Hash)]; !exists {
|
||||
sm.requestedTxns[daghash.TxID(iv.Hash)] = struct{}{}
|
||||
sm.limitTxIDMap(sm.requestedTxns, maxRequestedTxns)
|
||||
state.requestedTxns[daghash.TxID(iv.Hash)] = struct{}{}
|
||||
|
||||
gdmsg.AddInvVect(iv)
|
||||
numRequested++
|
||||
@ -1058,10 +1055,10 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
|
||||
}
|
||||
}
|
||||
|
||||
// limitMap is a helper function for maps that require a maximum limit by
|
||||
// limitTxIDMap is a helper function for maps that require a maximum limit by
|
||||
// evicting a random transaction if adding a new value would cause it to
|
||||
// overflow the maximum allowed.
|
||||
func (sm *SyncManager) limitMap(m map[daghash.Hash]struct{}, limit int) {
|
||||
func (sm *SyncManager) limitTxIDMap(m map[daghash.TxID]struct{}, limit int) {
|
||||
if len(m)+1 > limit {
|
||||
// Remove a random entry from the map. For most compilers, Go's
|
||||
// range statement iterates starting at a random item although
|
||||
@ -1069,8 +1066,26 @@ func (sm *SyncManager) limitMap(m map[daghash.Hash]struct{}, limit int) {
|
||||
// is not important here because an adversary would have to be
|
||||
// able to pull off preimage attacks on the hashing function in
|
||||
// order to target eviction of specific entries anyways.
|
||||
for txHash := range m {
|
||||
delete(m, txHash)
|
||||
for txID := range m {
|
||||
delete(m, txID)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// limitHashMap is a helper function for maps that require a maximum limit by
|
||||
// evicting a random item if adding a new value would cause it to
|
||||
// overflow the maximum allowed.
|
||||
func (sm *SyncManager) limitHashMap(m map[daghash.Hash]struct{}, limit int) {
|
||||
if len(m)+1 > limit {
|
||||
// Remove a random entry from the map. For most compilers, Go's
|
||||
// range statement iterates starting at a random item although
|
||||
// that is not 100% guaranteed by the spec. The iteration order
|
||||
// is not important here because an adversary would have to be
|
||||
// able to pull off preimage attacks on the hashing function in
|
||||
// order to target eviction of specific entries anyways.
|
||||
for hash := range m {
|
||||
delete(m, hash)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1349,8 +1364,8 @@ func New(config *Config) (*SyncManager, error) {
|
||||
dag: config.DAG,
|
||||
txMemPool: config.TxMemPool,
|
||||
chainParams: config.ChainParams,
|
||||
rejectedTxns: make(map[daghash.Hash]struct{}),
|
||||
requestedTxns: make(map[daghash.Hash]struct{}),
|
||||
rejectedTxns: make(map[daghash.TxID]struct{}),
|
||||
requestedTxns: make(map[daghash.TxID]struct{}),
|
||||
requestedBlocks: make(map[daghash.Hash]struct{}),
|
||||
peerStates: make(map[*peerpkg.Peer]*peerSyncState),
|
||||
progressLogger: newBlockProgressLogger("Processed", log),
|
||||
|
@ -77,10 +77,6 @@ var (
|
||||
// and is used to assign an id to a peer.
|
||||
nodeCount int32
|
||||
|
||||
// zeroHash is the zero value hash (all zeros). It is defined as a
|
||||
// convenience.
|
||||
zeroHash daghash.Hash
|
||||
|
||||
// sentNonces houses the unique nonces that are generated when pushing
|
||||
// version messages that are used to detect self connections.
|
||||
sentNonces = newMruNonceMap(50)
|
||||
@ -613,6 +609,7 @@ func (p *Peer) UserAgent() string {
|
||||
return userAgent
|
||||
}
|
||||
|
||||
// SubnetworkID returns peer subnetwork ID
|
||||
func (p *Peer) SubnetworkID() *subnetworkid.SubnetworkID {
|
||||
p.flagsMtx.Lock()
|
||||
subnetworkID := p.cfg.SubnetworkID
|
||||
@ -1012,7 +1009,7 @@ func (p *Peer) PushRejectMsg(command string, code wire.RejectCode, reason string
|
||||
log.Warnf("Sending a reject message for command "+
|
||||
"type %v which should have specified a hash "+
|
||||
"but does not", command)
|
||||
hash = &zeroHash
|
||||
hash = &daghash.ZeroHash
|
||||
}
|
||||
msg.Hash = *hash
|
||||
}
|
||||
|
@ -940,7 +940,7 @@ func (c *Client) notifySpentInternal(outpoints []btcjson.OutPoint) FutureNotifyS
|
||||
// outpoint from the wire type.
|
||||
func newOutPointFromWire(op *wire.OutPoint) btcjson.OutPoint {
|
||||
return btcjson.OutPoint{
|
||||
Hash: op.TxID.String(),
|
||||
TxID: op.TxID.String(),
|
||||
Index: op.Index,
|
||||
}
|
||||
}
|
||||
@ -1173,7 +1173,7 @@ func (c *Client) LoadTxFilterAsync(reload bool, addresses []util.Address,
|
||||
outPointObjects := make([]btcjson.OutPoint, len(outPoints))
|
||||
for i := range outPoints {
|
||||
outPointObjects[i] = btcjson.OutPoint{
|
||||
Hash: outPoints[i].TxID.String(),
|
||||
TxID: outPoints[i].TxID.String(),
|
||||
Index: outPoints[i].Index,
|
||||
}
|
||||
}
|
||||
|
@ -386,11 +386,11 @@ func (r FutureListLockUnspentResult) Receive() ([]*wire.OutPoint, error) {
|
||||
// Create a slice of outpoints from the transaction input structs.
|
||||
ops := make([]*wire.OutPoint, len(inputs))
|
||||
for i, input := range inputs {
|
||||
sha, err := daghash.NewHashFromStr(input.TxID)
|
||||
txID, err := daghash.NewTxIDFromStr(input.TxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ops[i] = wire.NewOutPoint(sha, input.Vout)
|
||||
ops[i] = wire.NewOutPoint(txID, input.Vout)
|
||||
}
|
||||
|
||||
return ops, nil
|
||||
|
@ -69,9 +69,6 @@ var (
|
||||
userAgentVersion = fmt.Sprintf("%d.%d.%d", version.AppMajor, version.AppMinor, version.AppPatch)
|
||||
)
|
||||
|
||||
// zeroHash is the zero value hash (all zeros). It is defined as a convenience.
|
||||
var zeroHash daghash.Hash
|
||||
|
||||
// onionAddr implements the net.Addr interface and represents a tor address.
|
||||
type onionAddr struct {
|
||||
addr string
|
||||
@ -474,7 +471,7 @@ func (sp *Peer) OnMemPool(_ *peer.Peer, msg *wire.MsgMemPool) {
|
||||
// or only the transactions that match the filter when there is
|
||||
// one.
|
||||
if !sp.filter.IsLoaded() || sp.filter.MatchTxAndUpdate(txDesc.Tx) {
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, txDesc.Tx.ID())
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txDesc.Tx.ID()))
|
||||
invMsg.AddInvVect(iv)
|
||||
if len(invMsg.InvList)+1 > wire.MaxInvPerMsg {
|
||||
break
|
||||
@ -503,7 +500,7 @@ func (sp *Peer) OnTx(_ *peer.Peer, msg *wire.MsgTx) {
|
||||
// Convert the raw MsgTx to a util.Tx which provides some convenience
|
||||
// methods and things such as hash caching.
|
||||
tx := util.NewTx(msg)
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, tx.ID())
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(tx.ID()))
|
||||
sp.AddKnownInventory(iv)
|
||||
|
||||
// Queue the transaction up to be handled by the sync manager and
|
||||
@ -619,7 +616,7 @@ func (sp *Peer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) {
|
||||
var err error
|
||||
switch iv.Type {
|
||||
case wire.InvTypeTx:
|
||||
err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan)
|
||||
err = sp.server.pushTxMsg(sp, (*daghash.TxID)(&iv.Hash), c, waitChan)
|
||||
case wire.InvTypeBlock:
|
||||
err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan)
|
||||
case wire.InvTypeFilteredBlock:
|
||||
@ -1217,23 +1214,23 @@ func (s *Server) RemoveRebroadcastInventory(iv *wire.InvVect) {
|
||||
// passed transactions to all connected peers.
|
||||
func (s *Server) RelayTransactions(txns []*mempool.TxDesc) {
|
||||
for _, txD := range txns {
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, txD.Tx.ID())
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txD.Tx.ID()))
|
||||
s.RelayInventory(iv, txD)
|
||||
}
|
||||
}
|
||||
|
||||
// pushTxMsg sends a tx message for the provided transaction hash to the
|
||||
// connected peer. An error is returned if the transaction hash is not known.
|
||||
func (s *Server) pushTxMsg(sp *Peer, hash *daghash.Hash, doneChan chan<- struct{},
|
||||
func (s *Server) pushTxMsg(sp *Peer, txID *daghash.TxID, doneChan chan<- struct{},
|
||||
waitChan <-chan struct{}) error {
|
||||
|
||||
// Attempt to fetch the requested transaction from the pool. A
|
||||
// call could be made to check for existence first, but simply trying
|
||||
// to fetch a missing transaction results in the same behavior.
|
||||
tx, err := s.TxMemPool.FetchTransaction(hash)
|
||||
tx, err := s.TxMemPool.FetchTransaction(txID)
|
||||
if err != nil {
|
||||
peerLog.Tracef("Unable to fetch tx %v from transaction "+
|
||||
"pool: %v", hash, err)
|
||||
"pool: %v", txID, err)
|
||||
|
||||
if doneChan != nil {
|
||||
doneChan <- struct{}{}
|
||||
@ -2862,6 +2859,6 @@ func (s *Server) TransactionConfirmed(tx *util.Tx) {
|
||||
return
|
||||
}
|
||||
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, tx.ID())
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(tx.ID()))
|
||||
s.RemoveRebroadcastInventory(iv)
|
||||
}
|
||||
|
@ -326,10 +326,10 @@ func rpcDecodeHexError(gotHex string) *btcjson.RPCError {
|
||||
// rpcNoTxInfoError is a convenience function for returning a nicely formatted
|
||||
// RPC error which indicates there is no information available for the provided
|
||||
// transaction hash.
|
||||
func rpcNoTxInfoError(txHash *daghash.Hash) *btcjson.RPCError {
|
||||
func rpcNoTxInfoError(txID *daghash.TxID) *btcjson.RPCError {
|
||||
return btcjson.NewRPCError(btcjson.ErrRPCNoTxInfo,
|
||||
fmt.Sprintf("No information available about transaction %v",
|
||||
txHash))
|
||||
txID))
|
||||
}
|
||||
|
||||
// gbtWorkState houses state that is used in between multiple RPC invocations to
|
||||
@ -545,12 +545,12 @@ func handleCreateRawTransaction(s *Server, cmd interface{}, closeChan <-chan str
|
||||
// some validity checks.
|
||||
mtx := wire.NewMsgTx(wire.TxVersion)
|
||||
for _, input := range c.Inputs {
|
||||
txHash, err := daghash.NewHashFromStr(input.TxID)
|
||||
txID, err := daghash.NewTxIDFromStr(input.TxID)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(input.TxID)
|
||||
}
|
||||
|
||||
prevOut := wire.NewOutPoint(txHash, input.Vout)
|
||||
prevOut := wire.NewOutPoint(txID, input.Vout)
|
||||
txIn := wire.NewTxIn(prevOut, []byte{})
|
||||
if c.LockTime != nil && *c.LockTime != 0 {
|
||||
txIn.Sequence = wire.MaxTxInSequenceNum - 1
|
||||
@ -1679,7 +1679,7 @@ func (state *gbtWorkState) blockTemplateResult(useCoinbaseValue bool, submitOld
|
||||
// the adjustments to the various lengths and indices.
|
||||
numTx := len(msgBlock.Transactions)
|
||||
transactions := make([]btcjson.GetBlockTemplateResultTx, 0, numTx-1)
|
||||
txIndex := make(map[daghash.Hash]int64, numTx)
|
||||
txIndex := make(map[daghash.TxID]int64, numTx)
|
||||
for i, tx := range msgBlock.Transactions {
|
||||
txID := tx.TxID()
|
||||
txIndex[txID] = int64(i)
|
||||
@ -2456,7 +2456,7 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
||||
c := cmd.(*btcjson.GetRawTransactionCmd)
|
||||
|
||||
// Convert the provided transaction hash hex to a Hash.
|
||||
txHash, err := daghash.NewHashFromStr(c.TxID)
|
||||
txID, err := daghash.NewTxIDFromStr(c.TxID)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(c.TxID)
|
||||
}
|
||||
@ -2471,7 +2471,7 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
||||
var mtx *wire.MsgTx
|
||||
var blkHash *daghash.Hash
|
||||
var blkHeight int32
|
||||
tx, err := s.cfg.TxMemPool.FetchTransaction(txHash)
|
||||
tx, err := s.cfg.TxMemPool.FetchTransaction(txID)
|
||||
if err != nil {
|
||||
if s.cfg.TxIndex == nil {
|
||||
return nil, &btcjson.RPCError{
|
||||
@ -2483,13 +2483,13 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
||||
}
|
||||
|
||||
// Look up the location of the transaction.
|
||||
blockRegion, err := s.cfg.TxIndex.TxFirstBlockRegion(txHash)
|
||||
blockRegion, err := s.cfg.TxIndex.TxFirstBlockRegion(txID)
|
||||
if err != nil {
|
||||
context := "Failed to retrieve transaction location"
|
||||
return nil, internalRPCError(err.Error(), context)
|
||||
}
|
||||
if blockRegion == nil {
|
||||
return nil, rpcNoTxInfoError(txHash)
|
||||
return nil, rpcNoTxInfoError(txID)
|
||||
}
|
||||
|
||||
// Load the raw transaction bytes from the database.
|
||||
@ -2500,7 +2500,7 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, rpcNoTxInfoError(txHash)
|
||||
return nil, rpcNoTxInfoError(txID)
|
||||
}
|
||||
|
||||
// When the verbose flag isn't set, simply return the serialized
|
||||
@ -2562,7 +2562,7 @@ func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct
|
||||
dagHeight = s.cfg.DAG.Height()
|
||||
}
|
||||
|
||||
rawTxn, err := createTxRawResult(s.cfg.DAGParams, mtx, txHash.String(),
|
||||
rawTxn, err := createTxRawResult(s.cfg.DAGParams, mtx, txID.String(),
|
||||
blkHeader, blkHashStr, blkHeight, dagHeight, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -2575,7 +2575,7 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
|
||||
c := cmd.(*btcjson.GetTxOutCmd)
|
||||
|
||||
// Convert the provided transaction hash hex to a Hash.
|
||||
txHash, err := daghash.NewHashFromStr(c.TxID)
|
||||
txID, err := daghash.NewTxIDFromStr(c.TxID)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(c.TxID)
|
||||
}
|
||||
@ -2593,10 +2593,10 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
|
||||
}
|
||||
// TODO: This is racy. It should attempt to fetch it directly and check
|
||||
// the error.
|
||||
if includeMempool && s.cfg.TxMemPool.HaveTransaction(txHash) {
|
||||
tx, err := s.cfg.TxMemPool.FetchTransaction(txHash)
|
||||
if includeMempool && s.cfg.TxMemPool.HaveTransaction(txID) {
|
||||
tx, err := s.cfg.TxMemPool.FetchTransaction(txID)
|
||||
if err != nil {
|
||||
return nil, rpcNoTxInfoError(txHash)
|
||||
return nil, rpcNoTxInfoError(txID)
|
||||
}
|
||||
|
||||
mtx := tx.MsgTx()
|
||||
@ -2611,7 +2611,7 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
|
||||
txOut := mtx.TxOut[c.Vout]
|
||||
if txOut == nil {
|
||||
errStr := fmt.Sprintf("Output index: %d for txid: %s "+
|
||||
"does not exist", c.Vout, txHash)
|
||||
"does not exist", c.Vout, txID)
|
||||
return nil, internalRPCError(errStr, "")
|
||||
}
|
||||
|
||||
@ -2621,10 +2621,10 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
|
||||
pkScript = txOut.PkScript
|
||||
isCoinbase = mtx.IsCoinBase()
|
||||
} else {
|
||||
out := wire.OutPoint{TxID: *txHash, Index: c.Vout}
|
||||
out := wire.OutPoint{TxID: *txID, Index: c.Vout}
|
||||
entry, ok := s.cfg.DAG.GetUTXOEntry(out)
|
||||
if !ok {
|
||||
return nil, rpcNoTxInfoError(txHash)
|
||||
return nil, rpcNoTxInfoError(txID)
|
||||
}
|
||||
|
||||
// To match the behavior of the reference client, return nil
|
||||
@ -3280,7 +3280,7 @@ func handleSendRawTransaction(s *Server, cmd interface{}, closeChan <-chan struc
|
||||
// Keep track of all the sendRawTransaction request txns so that they
|
||||
// can be rebroadcast if they don't make their way into a block.
|
||||
txD := acceptedTxs[0]
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, txD.Tx.ID())
|
||||
iv := wire.NewInvVect(wire.InvTypeTx, (*daghash.Hash)(txD.Tx.ID()))
|
||||
s.cfg.ConnMgr.AddRebroadcastInventory(iv, txD)
|
||||
|
||||
return tx.ID().String(), nil
|
||||
|
@ -618,7 +618,7 @@ var helpDescsEnUS = map[string]string{
|
||||
"stopNotifyReceived-addresses": "List of address to cancel receive notifications for",
|
||||
|
||||
// OutPoint help.
|
||||
"outPoint-hash": "The hex-encoded bytes of the outPoint hash",
|
||||
"outPoint-txid": "The hex-encoded bytes of the outPoint transaction ID",
|
||||
"outPoint-index": "The index of the outPoint",
|
||||
|
||||
// NotifySpentCmd help.
|
||||
|
@ -1824,7 +1824,7 @@ func handleLoadTxFilter(wsc *wsClient, icmd interface{}) (interface{}, error) {
|
||||
|
||||
outPoints := make([]wire.OutPoint, len(cmd.OutPoints))
|
||||
for i := range cmd.OutPoints {
|
||||
hash, err := daghash.NewHashFromStr(cmd.OutPoints[i].Hash)
|
||||
txID, err := daghash.NewTxIDFromStr(cmd.OutPoints[i].TxID)
|
||||
if err != nil {
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCInvalidParameter,
|
||||
@ -1832,7 +1832,7 @@ func handleLoadTxFilter(wsc *wsClient, icmd interface{}) (interface{}, error) {
|
||||
}
|
||||
}
|
||||
outPoints[i] = wire.OutPoint{
|
||||
TxID: *hash,
|
||||
TxID: *txID,
|
||||
Index: cmd.OutPoints[i].Index,
|
||||
}
|
||||
}
|
||||
@ -2044,12 +2044,12 @@ func checkAddressValidity(addrs []string, params *dagconfig.Params) error {
|
||||
func deserializeOutpoints(serializedOuts []btcjson.OutPoint) ([]*wire.OutPoint, error) {
|
||||
outpoints := make([]*wire.OutPoint, 0, len(serializedOuts))
|
||||
for i := range serializedOuts {
|
||||
blockHash, err := daghash.NewHashFromStr(serializedOuts[i].Hash)
|
||||
txID, err := daghash.NewTxIDFromStr(serializedOuts[i].TxID)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(serializedOuts[i].Hash)
|
||||
return nil, rpcDecodeHexError(serializedOuts[i].TxID)
|
||||
}
|
||||
index := serializedOuts[i].Index
|
||||
outpoints = append(outpoints, wire.NewOutPoint(blockHash, index))
|
||||
outpoints = append(outpoints, wire.NewOutPoint(txID, index))
|
||||
}
|
||||
|
||||
return outpoints, nil
|
||||
|
@ -30,7 +30,7 @@ func TestBadPC(t *testing.T) {
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc9, 0x97, 0xa5, 0xe5,
|
||||
0x6e, 0x10, 0x41, 0x02,
|
||||
0xfa, 0x20, 0x9c, 0x6a,
|
||||
@ -105,7 +105,7 @@ func TestCheckErrorCondition(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc9, 0x97, 0xa5, 0xe5,
|
||||
0x6e, 0x10, 0x41, 0x02,
|
||||
0xfa, 0x20, 0x9c, 0x6a,
|
||||
@ -402,7 +402,7 @@ func TestDisasmPC(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc9, 0x97, 0xa5, 0xe5,
|
||||
0x6e, 0x10, 0x41, 0x02,
|
||||
0xfa, 0x20, 0x9c, 0x6a,
|
||||
@ -464,7 +464,7 @@ func TestDisasmScript(t *testing.T) {
|
||||
Version: 1,
|
||||
TxIn: []*wire.TxIn{{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{
|
||||
TxID: daghash.TxID([32]byte{
|
||||
0xc9, 0x97, 0xa5, 0xe5,
|
||||
0x6e, 0x10, 0x41, 0x02,
|
||||
0xfa, 0x20, 0x9c, 0x6a,
|
||||
|
@ -101,7 +101,7 @@ func ExampleSignTxOutput() {
|
||||
// would ordinarily be the real transaction that is being spent. It
|
||||
// contains a single output that pays to address in the amount of 1 BTC.
|
||||
originTx := wire.NewMsgTx(wire.TxVersion)
|
||||
prevOut := wire.NewOutPoint(&daghash.Hash{}, ^uint32(0))
|
||||
prevOut := wire.NewOutPoint(&daghash.TxID{}, ^uint32(0))
|
||||
txIn := wire.NewTxIn(prevOut, []byte{txscript.Op0, txscript.Op0})
|
||||
originTx.AddTxIn(txIn)
|
||||
pkScript, err := txscript.PayToAddrScript(addr)
|
||||
|
@ -217,7 +217,7 @@ func parseExpectedResult(expected string) ([]ErrorCode, error) {
|
||||
func createSpendingTx(sigScript, pkScript []byte) *wire.MsgTx {
|
||||
coinbaseTx := wire.NewMsgTx(wire.TxVersion)
|
||||
|
||||
outPoint := wire.NewOutPoint(&daghash.Hash{}, ^uint32(0))
|
||||
outPoint := wire.NewOutPoint(&daghash.TxID{}, ^uint32(0))
|
||||
txIn := wire.NewTxIn(outPoint, []byte{Op0, Op0})
|
||||
txOut := wire.NewTxOut(0, pkScript)
|
||||
coinbaseTx.AddTxIn(txIn)
|
||||
|
@ -104,21 +104,21 @@ func TestSignTxOutput(t *testing.T) {
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0,
|
||||
},
|
||||
Sequence: 4294967295,
|
||||
},
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 1,
|
||||
},
|
||||
Sequence: 4294967295,
|
||||
},
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 2,
|
||||
},
|
||||
Sequence: 4294967295,
|
||||
|
@ -334,7 +334,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -369,7 +369,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
|
||||
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
|
||||
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
|
||||
@ -439,7 +439,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
|
||||
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
|
||||
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
|
||||
@ -508,7 +508,7 @@ var Block100000 = wire.MsgBlock{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
TxID: daghash.Hash([32]byte{ // Make go vet happy.
|
||||
TxID: daghash.TxID([32]byte{ // Make go vet happy.
|
||||
0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73,
|
||||
0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac,
|
||||
0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90,
|
||||
|
@ -250,15 +250,15 @@ func (bf *Filter) AddOutPoint(outpoint *wire.OutPoint) {
|
||||
// script.
|
||||
//
|
||||
// This function MUST be called with the filter lock held.
|
||||
func (bf *Filter) maybeAddOutpoint(pkScript []byte, outHash *daghash.Hash, outIdx uint32) {
|
||||
func (bf *Filter) maybeAddOutpoint(pkScript []byte, outTxID *daghash.TxID, outIdx uint32) {
|
||||
switch bf.msgFilterLoad.Flags {
|
||||
case wire.BloomUpdateAll:
|
||||
outpoint := wire.NewOutPoint(outHash, outIdx)
|
||||
outpoint := wire.NewOutPoint(outTxID, outIdx)
|
||||
bf.addOutPoint(outpoint)
|
||||
case wire.BloomUpdateP2PubkeyOnly:
|
||||
class := txscript.GetScriptClass(pkScript)
|
||||
if class == txscript.PubKeyTy || class == txscript.MultiSigTy {
|
||||
outpoint := wire.NewOutPoint(outHash, outIdx)
|
||||
outpoint := wire.NewOutPoint(outTxID, outIdx)
|
||||
bf.addOutPoint(outpoint)
|
||||
}
|
||||
}
|
||||
|
@ -414,12 +414,12 @@ func TestFilterBloomMatch(t *testing.T) {
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"
|
||||
hash, err = daghash.NewHashFromStr(inputStr)
|
||||
txID, err := daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
outpoint := wire.NewOutPoint(hash, 0)
|
||||
outpoint := wire.NewOutPoint(txID, 0)
|
||||
f.AddOutPoint(outpoint)
|
||||
if !f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch didn't match outpoint %s", inputStr)
|
||||
@ -451,12 +451,12 @@ func TestFilterBloomMatch(t *testing.T) {
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"
|
||||
hash, err = daghash.NewHashFromStr(inputStr)
|
||||
txID, err = daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 1)
|
||||
outpoint = wire.NewOutPoint(txID, 1)
|
||||
f.AddOutPoint(outpoint)
|
||||
if f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch matched outpoint %s", inputStr)
|
||||
@ -464,12 +464,12 @@ func TestFilterBloomMatch(t *testing.T) {
|
||||
|
||||
f = bloom.NewFilter(10, 0, 0.000001, wire.BloomUpdateAll)
|
||||
inputStr = "000000d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"
|
||||
hash, err = daghash.NewHashFromStr(inputStr)
|
||||
txID, err = daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterBloomMatch NewHashFromStr failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 0)
|
||||
outpoint = wire.NewOutPoint(txID, 0)
|
||||
f.AddOutPoint(outpoint)
|
||||
if f.MatchTxAndUpdate(tx) {
|
||||
t.Errorf("TestFilterBloomMatch matched outpoint %s", inputStr)
|
||||
@ -500,12 +500,12 @@ func TestFilterInsertUpdateNone(t *testing.T) {
|
||||
f.Add(inputBytes)
|
||||
|
||||
inputStr = "147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"
|
||||
hash, err := daghash.NewHashFromStr(inputStr)
|
||||
txID, err := daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertUpdateNone NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint := wire.NewOutPoint(hash, 0)
|
||||
outpoint := wire.NewOutPoint(txID, 0)
|
||||
|
||||
if f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestFilterInsertUpdateNone matched outpoint %s", inputStr)
|
||||
@ -513,12 +513,12 @@ func TestFilterInsertUpdateNone(t *testing.T) {
|
||||
}
|
||||
|
||||
inputStr = "02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"
|
||||
hash, err = daghash.NewHashFromStr(inputStr)
|
||||
txID, err = daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestFilterInsertUpdateNone NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 0)
|
||||
outpoint = wire.NewOutPoint(txID, 0)
|
||||
|
||||
if f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestFilterInsertUpdateNone matched outpoint %s", inputStr)
|
||||
@ -826,12 +826,12 @@ func TestFilterInsertP2PubKeyOnly(t *testing.T) {
|
||||
|
||||
// We should match the generation pubkey
|
||||
inputStr = "042aaac8c54b07f1e729e01d38b1fb26c6d595ed5920b856faf4070db79ce933" //0st tx hash
|
||||
hash, err := daghash.NewHashFromStr(inputStr)
|
||||
txID, err := daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint := wire.NewOutPoint(hash, 0)
|
||||
outpoint := wire.NewOutPoint(txID, 0)
|
||||
if !f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly didn't match the generation "+
|
||||
"outpoint %s", inputStr)
|
||||
@ -840,12 +840,12 @@ func TestFilterInsertP2PubKeyOnly(t *testing.T) {
|
||||
|
||||
// We should not match the 4th transaction, which is not p2pk
|
||||
inputStr = "f9a116ecc107b6b1b0bdcd0d727bfaa3355f27f8fed08347bf0004244949d9eb"
|
||||
hash, err = daghash.NewHashFromStr(inputStr)
|
||||
txID, err = daghash.NewTxIDFromStr(inputStr)
|
||||
if err != nil {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly NewHashFromStr failed: %v", err)
|
||||
return
|
||||
}
|
||||
outpoint = wire.NewOutPoint(hash, 0)
|
||||
outpoint = wire.NewOutPoint(txID, 0)
|
||||
if f.MatchesOutPoint(outpoint) {
|
||||
t.Errorf("TestMerkleBlockP2PubKeyOnly matched outpoint %s", inputStr)
|
||||
return
|
||||
|
@ -96,7 +96,7 @@ func NewMerkleBlock(block *util.Block, filter *Filter) (*wire.MsgMerkleBlock, []
|
||||
} else {
|
||||
mBlock.matchedBits = append(mBlock.matchedBits, 0x00)
|
||||
}
|
||||
mBlock.allHashes = append(mBlock.allHashes, tx.ID())
|
||||
mBlock.allHashes = append(mBlock.allHashes, (*daghash.Hash)(tx.ID()))
|
||||
}
|
||||
|
||||
// Calculate the number of merkle branches (height) in the tree.
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
// Coin represents a spendable transaction outpoint
|
||||
type Coin interface {
|
||||
Hash() *daghash.Hash
|
||||
ID() *daghash.Hash
|
||||
ID() *daghash.TxID
|
||||
Index() uint32
|
||||
Value() util.Amount
|
||||
PkScript() []byte
|
||||
@ -361,7 +361,7 @@ func (c *SimpleCoin) Hash() *daghash.Hash {
|
||||
}
|
||||
|
||||
// ID returns the ID of the transaction on which the Coin is an output
|
||||
func (c *SimpleCoin) ID() *daghash.Hash {
|
||||
func (c *SimpleCoin) ID() *daghash.TxID {
|
||||
return c.Tx.ID()
|
||||
}
|
||||
|
||||
|
@ -19,14 +19,14 @@ import (
|
||||
|
||||
type TestCoin struct {
|
||||
TxHash *daghash.Hash
|
||||
TxID *daghash.Hash
|
||||
TxID *daghash.TxID
|
||||
TxIndex uint32
|
||||
TxValue util.Amount
|
||||
TxNumConfs int64
|
||||
}
|
||||
|
||||
func (c *TestCoin) Hash() *daghash.Hash { return c.TxHash }
|
||||
func (c *TestCoin) ID() *daghash.Hash { return c.TxID }
|
||||
func (c *TestCoin) ID() *daghash.TxID { return c.TxID }
|
||||
func (c *TestCoin) Index() uint32 { return c.TxIndex }
|
||||
func (c *TestCoin) Value() util.Amount { return c.TxValue }
|
||||
func (c *TestCoin) PkScript() []byte { return nil }
|
||||
@ -37,7 +37,7 @@ func NewCoin(index int64, value util.Amount, numConfs int64) coinset.Coin {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(fmt.Sprintf("%d", index)))
|
||||
hash, _ := daghash.NewHash(h.Sum(nil))
|
||||
id, _ := daghash.NewHash(h.Sum(nil))
|
||||
id, _ := daghash.NewTxID(h.Sum(nil))
|
||||
c := &TestCoin{
|
||||
TxHash: hash,
|
||||
TxID: id,
|
||||
@ -119,7 +119,7 @@ func TestCoinSet(t *testing.T) {
|
||||
t.Errorf("Expected only 1 TxIn, got %d", len(mtx.TxIn))
|
||||
}
|
||||
op := mtx.TxIn[0].PreviousOutPoint
|
||||
if !op.TxID.IsEqual(coins[1].Hash()) || op.Index != coins[1].Index() {
|
||||
if !op.TxID.IsEqual(coins[1].ID()) || op.Index != coins[1].Index() {
|
||||
t.Errorf("Expected the second coin to be added as input to mtx")
|
||||
}
|
||||
}
|
||||
|
@ -102,6 +102,11 @@ func (b *GCSBuilder) SetKeyFromHash(keyHash *daghash.Hash) *GCSBuilder {
|
||||
return b.SetKey(DeriveKey(keyHash))
|
||||
}
|
||||
|
||||
// SetKeyFromTxID is wrapper of SetKeyFromHash for TxID
|
||||
func (b *GCSBuilder) SetKeyFromTxID(keyTxID *daghash.TxID) *GCSBuilder {
|
||||
return b.SetKeyFromHash((*daghash.Hash)(keyTxID))
|
||||
}
|
||||
|
||||
// SetP sets the filter's probability after calling Builder().
|
||||
func (b *GCSBuilder) SetP(p uint8) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
@ -183,6 +188,12 @@ func (b *GCSBuilder) AddHash(hash *daghash.Hash) *GCSBuilder {
|
||||
return b.AddEntry(hash.CloneBytes())
|
||||
}
|
||||
|
||||
// AddTxID adds a daghash.TxID to the list of entries to be included in the
|
||||
// GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddTxID(txID *daghash.TxID) *GCSBuilder {
|
||||
return b.AddHash((*daghash.Hash)(txID))
|
||||
}
|
||||
|
||||
// AddScript adds all the data pushed in the script serialized as the passed
|
||||
// []byte to the list of entries to be included in the GCS filter when it's
|
||||
// built.
|
||||
@ -251,6 +262,11 @@ func WithKeyHashP(keyHash *daghash.Hash, p uint8) *GCSBuilder {
|
||||
return WithKeyHashPN(keyHash, p, 0)
|
||||
}
|
||||
|
||||
// WithKeyTxIDP is wrapper of WithKeyHashP for TxID
|
||||
func WithKeyTxIDP(keyTxID *daghash.TxID, p uint8) *GCSBuilder {
|
||||
return WithKeyHashP((*daghash.Hash)(keyTxID), p)
|
||||
}
|
||||
|
||||
// WithKeyHash creates a GCSBuilder with key derived from the specified
|
||||
// daghash.Hash. Probability is set to 20 (2^-20 collision probability).
|
||||
// Estimated filter size is set to zero, which means more reallocations are
|
||||
@ -259,6 +275,11 @@ func WithKeyHash(keyHash *daghash.Hash) *GCSBuilder {
|
||||
return WithKeyHashPN(keyHash, DefaultP, 0)
|
||||
}
|
||||
|
||||
// WithKeyTxID is wrapper of WithKeyHash for transaction ID
|
||||
func WithKeyTxID(keyTxID *daghash.TxID) *GCSBuilder {
|
||||
return WithKeyHash((*daghash.Hash)(keyTxID))
|
||||
}
|
||||
|
||||
// WithRandomKeyPN creates a GCSBuilder with a cryptographically random key and
|
||||
// the passed probability and estimated filter size.
|
||||
func WithRandomKeyPN(p uint8, n uint32) *GCSBuilder {
|
||||
@ -306,7 +327,7 @@ func BuildBasicFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
|
||||
// First we'll compute the bash of the transaction and add that
|
||||
// directly to the filter.
|
||||
txID := tx.TxID()
|
||||
b.AddHash(&txID)
|
||||
b.AddTxID(&txID)
|
||||
|
||||
// Skip the inputs for the coinbase transaction
|
||||
if i != 0 {
|
||||
|
@ -50,7 +50,7 @@ var (
|
||||
// TestUseBlockHash tests using a block hash as a filter key.
|
||||
func TestUseBlockHash(t *testing.T) {
|
||||
// Block hash #448710, pretty high difficulty.
|
||||
txID, err := daghash.NewHashFromStr(testHash)
|
||||
txID, err := daghash.NewTxIDFromStr(testHash)
|
||||
if err != nil {
|
||||
t.Fatalf("Hash from string failed: %s", err.Error())
|
||||
}
|
||||
@ -73,7 +73,7 @@ func TestUseBlockHash(t *testing.T) {
|
||||
|
||||
// Create a GCSBuilder with a key hash and check that the key is derived
|
||||
// correctly, then test it.
|
||||
b := builder.WithKeyHash(txID)
|
||||
b := builder.WithKeyTxID(txID)
|
||||
key, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with key hash failed: %s",
|
||||
@ -87,13 +87,13 @@ func TestUseBlockHash(t *testing.T) {
|
||||
BuilderTest(b, txID, builder.DefaultP, outPoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a key hash and non-default P and test it.
|
||||
b = builder.WithKeyHashP(txID, 30)
|
||||
b = builder.WithKeyTxIDP(txID, 30)
|
||||
BuilderTest(b, txID, 30, outPoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a random key, set the key from a hash
|
||||
// manually, check that the key is correct, and test it.
|
||||
b = builder.WithRandomKey()
|
||||
b.SetKeyFromHash(txID)
|
||||
b.SetKeyFromTxID(txID)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
@ -159,9 +159,9 @@ func TestUseBlockHash(t *testing.T) {
|
||||
|
||||
// Create a GCSBuilder with a known key and too-high P and ensure error
|
||||
// works throughout all functions that use it.
|
||||
b = builder.WithRandomKeyP(33).SetKeyFromHash(txID).SetKey(testKey)
|
||||
b = builder.WithRandomKeyP(33).SetKeyFromTxID(txID).SetKey(testKey)
|
||||
b.SetP(30).AddEntry(txID.CloneBytes()).AddEntries(contents)
|
||||
b.AddOutPoint(outPoint).AddHash(txID).AddScript(addrBytes)
|
||||
b.AddOutPoint(outPoint).AddTxID(txID).AddScript(addrBytes)
|
||||
_, err = b.Key()
|
||||
if err != gcs.ErrPTooBig {
|
||||
t.Fatalf("No error on P too big!")
|
||||
@ -172,7 +172,7 @@ func TestUseBlockHash(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderTest(b *builder.GCSBuilder, hash *daghash.Hash, p uint8,
|
||||
func BuilderTest(b *builder.GCSBuilder, txID *daghash.TxID, p uint8,
|
||||
outPoint wire.OutPoint, addrBytes []byte, t *testing.T) {
|
||||
|
||||
key, err := b.Key()
|
||||
@ -206,13 +206,13 @@ func BuilderTest(b *builder.GCSBuilder, hash *daghash.Hash, p uint8,
|
||||
builder.DefaultP)
|
||||
}
|
||||
|
||||
// Add a hash, build a filter, and test matches
|
||||
b.AddHash(hash)
|
||||
// Add a txID, build a filter, and test matches
|
||||
b.AddTxID(txID)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.Match(key, hash.CloneBytes())
|
||||
match, err = f.Match(key, txID.CloneBytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
@ -226,7 +226,7 @@ func BuilderTest(b *builder.GCSBuilder, hash *daghash.Hash, p uint8,
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.Match(key, hash.CloneBytes())
|
||||
match, err = f.Match(key, txID.CloneBytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ const TxIndexUnknown = -1
|
||||
type Tx struct {
|
||||
msgTx *wire.MsgTx // Underlying MsgTx
|
||||
txHash *daghash.Hash // Cached transaction hash
|
||||
txID *daghash.Hash // Cached transaction ID
|
||||
txID *daghash.TxID // Cached transaction ID
|
||||
txIndex int // Position within a block or TxIndexUnknown
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ func (t *Tx) Hash() *daghash.Hash {
|
||||
// ID returns the id of the transaction. This is equivalent to
|
||||
// calling TxID on the underlying wire.MsgTx, however it caches the
|
||||
// result so subsequent calls are more efficient.
|
||||
func (t *Tx) ID() *daghash.Hash {
|
||||
func (t *Tx) ID() *daghash.TxID {
|
||||
// Return the cached hash if it has already been generated.
|
||||
if t.txID != nil {
|
||||
return t.txID
|
||||
|
@ -54,7 +54,7 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// ID for block 100,000 transaction 1.
|
||||
wantIDStr := "1742649144632997855e06650c1df5fd27cad915419a8f14f2f1b5a652257342"
|
||||
wantID, err := daghash.NewHashFromStr(wantIDStr)
|
||||
wantID, err := daghash.NewTxIDFromStr(wantIDStr)
|
||||
// Request the ID multiple times to test generation and caching.
|
||||
for i := 0; i < 2; i++ {
|
||||
id := secondTx.ID()
|
||||
|
@ -24,7 +24,7 @@ var genesisCoinbaseTx = MsgTx{
|
||||
TxIn: []*TxIn{
|
||||
{
|
||||
PreviousOutPoint: OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
@ -197,7 +197,7 @@ func BenchmarkReadOutPoint(b *testing.B) {
|
||||
// transaction output point.
|
||||
func BenchmarkWriteOutPoint(b *testing.B) {
|
||||
op := &OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0,
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -551,7 +551,7 @@ var blockOne = MsgBlock{
|
||||
TxIn: []*TxIn{
|
||||
{
|
||||
PreviousOutPoint: OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
|
@ -57,7 +57,7 @@ const (
|
||||
defaultTxInOutAlloc = 15
|
||||
|
||||
// minTxInPayload is the minimum payload size for a transaction input.
|
||||
// PreviousOutPoint.Hash + PreviousOutPoint.Index 4 bytes + Varint for
|
||||
// PreviousOutPoint.TxID + PreviousOutPoint.Index 4 bytes + Varint for
|
||||
// SignatureScript length 1 byte + Sequence 4 bytes.
|
||||
minTxInPayload = 9 + daghash.HashSize
|
||||
|
||||
@ -181,22 +181,22 @@ var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems)
|
||||
// OutPoint defines a bitcoin data type that is used to track previous
|
||||
// transaction outputs.
|
||||
type OutPoint struct {
|
||||
TxID daghash.Hash
|
||||
TxID daghash.TxID
|
||||
Index uint32
|
||||
}
|
||||
|
||||
// NewOutPoint returns a new bitcoin transaction outpoint point with the
|
||||
// provided hash and index.
|
||||
func NewOutPoint(hash *daghash.Hash, index uint32) *OutPoint {
|
||||
func NewOutPoint(txID *daghash.TxID, index uint32) *OutPoint {
|
||||
return &OutPoint{
|
||||
TxID: *hash,
|
||||
TxID: *txID,
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the OutPoint in the human-readable form "hash:index".
|
||||
// String returns the OutPoint in the human-readable form "txID:index".
|
||||
func (o OutPoint) String() string {
|
||||
// Allocate enough for hash string, colon, and 10 digits. Although
|
||||
// Allocate enough for ID string, colon, and 10 digits. Although
|
||||
// at the time of writing, the number of digits can be no greater than
|
||||
// the length of the decimal representation of maxTxOutPerMessage, the
|
||||
// maximum message payload may increase in the future and this
|
||||
@ -223,7 +223,7 @@ func (t *TxIn) SerializeSize() int {
|
||||
}
|
||||
|
||||
func (t *TxIn) serializeSize(encodingFlags txEncoding) int {
|
||||
// Outpoint Hash 32 bytes + Outpoint Index 4 bytes + Sequence 8 bytes +
|
||||
// Outpoint ID 32 bytes + Outpoint Index 4 bytes + Sequence 8 bytes +
|
||||
// serialized varint size for the length of SignatureScript +
|
||||
// SignatureScript bytes.
|
||||
return 44 + serializeSignatureScriptSize(t.SignatureScript, encodingFlags)
|
||||
@ -301,7 +301,7 @@ func (msg *MsgTx) AddTxOut(to *TxOut) {
|
||||
// is a special transaction created by miners that has no inputs. This is
|
||||
// represented in the block dag by a transaction with a single input that has
|
||||
// a previous output transaction index set to the maximum value along with a
|
||||
// zero hash.
|
||||
// zero TxID.
|
||||
func (msg *MsgTx) IsCoinBase() bool {
|
||||
// A coin base must only have one transaction input.
|
||||
if len(msg.TxIn) != 1 {
|
||||
@ -309,9 +309,9 @@ func (msg *MsgTx) IsCoinBase() bool {
|
||||
}
|
||||
|
||||
// The previous output of a coinbase must have a max value index and
|
||||
// a zero hash.
|
||||
// a zero TxID.
|
||||
prevOut := &msg.TxIn[0].PreviousOutPoint
|
||||
return prevOut.Index == math.MaxUint32 && prevOut.TxID == daghash.Zero
|
||||
return prevOut.Index == math.MaxUint32 && prevOut.TxID == daghash.ZeroTxID
|
||||
}
|
||||
|
||||
// TxHash generates the Hash for the transaction.
|
||||
@ -326,7 +326,7 @@ func (msg *MsgTx) TxHash() daghash.Hash {
|
||||
}
|
||||
|
||||
// TxID generates the Hash for the transaction without the signature script, gas and payload fields.
|
||||
func (msg *MsgTx) TxID() daghash.Hash {
|
||||
func (msg *MsgTx) TxID() daghash.TxID {
|
||||
// Encode the transaction, replace signature script, payload and gas with
|
||||
// zeroes, and calculate double sha256 on the result.
|
||||
// Ignore the error returns since the only way the encode could fail
|
||||
@ -338,7 +338,7 @@ func (msg *MsgTx) TxID() daghash.Hash {
|
||||
}
|
||||
buf := bytes.NewBuffer(make([]byte, 0, msg.serializeSize(encodingFlags)))
|
||||
_ = msg.serialize(buf, encodingFlags)
|
||||
return daghash.DoubleHashH(buf.Bytes())
|
||||
return daghash.TxID(daghash.DoubleHashH(buf.Bytes()))
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of a transaction so that the original does not get
|
||||
|
@ -22,11 +22,10 @@ import (
|
||||
func TestTx(t *testing.T) {
|
||||
pver := ProtocolVersion
|
||||
|
||||
// Block 100000 hash.
|
||||
hashStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
||||
hash, err := daghash.NewHashFromStr(hashStr)
|
||||
txIDStr := "3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
|
||||
txID, err := daghash.NewTxIDFromStr(txIDStr)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
}
|
||||
|
||||
// Ensure the command is expected value.
|
||||
@ -50,16 +49,16 @@ func TestTx(t *testing.T) {
|
||||
// NOTE: This is a block hash and made up index, but we're only
|
||||
// testing package functionality.
|
||||
prevOutIndex := uint32(1)
|
||||
prevOut := NewOutPoint(hash, prevOutIndex)
|
||||
if !prevOut.TxID.IsEqual(hash) {
|
||||
t.Errorf("NewOutPoint: wrong hash - got %v, want %v",
|
||||
spew.Sprint(&prevOut.TxID), spew.Sprint(hash))
|
||||
prevOut := NewOutPoint(txID, prevOutIndex)
|
||||
if !prevOut.TxID.IsEqual(txID) {
|
||||
t.Errorf("NewOutPoint: wrong ID - got %v, want %v",
|
||||
spew.Sprint(&prevOut.TxID), spew.Sprint(txID))
|
||||
}
|
||||
if prevOut.Index != prevOutIndex {
|
||||
t.Errorf("NewOutPoint: wrong index - got %v, want %v",
|
||||
prevOut.Index, prevOutIndex)
|
||||
}
|
||||
prevOutStr := fmt.Sprintf("%s:%d", hash.String(), prevOutIndex)
|
||||
prevOutStr := fmt.Sprintf("%s:%d", txID.String(), prevOutIndex)
|
||||
if s := prevOut.String(); s != prevOutStr {
|
||||
t.Errorf("OutPoint.String: unexpected result - got %v, "+
|
||||
"want %v", s, prevOutStr)
|
||||
@ -130,8 +129,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
hash1Str := "2d0dd1e05410fe76afbd90f577f615d603ca00b2fa53f963e6375ce742343faa"
|
||||
wantHash1, err := daghash.NewHashFromStr(hash1Str)
|
||||
txID1Str := "2d0dd1e05410fe76afbd90f577f615d603ca00b2fa53f963e6375ce742343faa"
|
||||
wantTxID1, err := daghash.NewTxIDFromStr(txID1Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
return
|
||||
@ -141,7 +140,7 @@ func TestTxHashAndID(t *testing.T) {
|
||||
tx1 := NewMsgTx(1)
|
||||
txIn := TxIn{
|
||||
PreviousOutPoint: OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
|
||||
@ -169,16 +168,16 @@ func TestTxHashAndID(t *testing.T) {
|
||||
|
||||
// Ensure the hash produced is expected.
|
||||
tx1Hash := tx1.TxHash()
|
||||
if !tx1Hash.IsEqual(wantHash1) {
|
||||
if !tx1Hash.IsEqual((*daghash.Hash)(wantTxID1)) {
|
||||
t.Errorf("TxHash: wrong hash - got %v, want %v",
|
||||
spew.Sprint(tx1Hash), spew.Sprint(wantHash1))
|
||||
spew.Sprint(tx1Hash), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
// Ensure the TxID for coinbase transaction is the same as TxHash.
|
||||
tx1ID := tx1.TxID()
|
||||
if !tx1ID.IsEqual(wantHash1) {
|
||||
if !tx1ID.IsEqual(wantTxID1) {
|
||||
t.Errorf("TxID: wrong ID - got %v, want %v",
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantHash1))
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "ef55c85be28615b699bef1470d0d041982a6f3af5f900c978c3837b967b168b3"
|
||||
@ -189,7 +188,7 @@ func TestTxHashAndID(t *testing.T) {
|
||||
}
|
||||
|
||||
id2Str := "12063f97b5fbbf441bd7962f88631a36a4b4a67649045c02ed840bedc97e88ea"
|
||||
wantID2, err := daghash.NewHashFromStr(id2Str)
|
||||
wantID2, err := daghash.NewTxIDFromStr(id2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewHashFromStr: %v", err)
|
||||
return
|
||||
@ -200,7 +199,7 @@ func TestTxHashAndID(t *testing.T) {
|
||||
{
|
||||
PreviousOutPoint: OutPoint{
|
||||
Index: 0,
|
||||
TxID: daghash.Hash{1, 2, 3},
|
||||
TxID: daghash.TxID{1, 2, 3},
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xDA, 0x0D, 0xC6, 0xAE, 0xCE, 0xFE, 0x1E, 0x06, 0xEF, 0xDF,
|
||||
@ -251,14 +250,14 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx2ID), spew.Sprint(wantID2))
|
||||
}
|
||||
|
||||
if tx2ID.IsEqual(&tx2Hash) {
|
||||
if tx2ID.IsEqual((*daghash.TxID)(&tx2Hash)) {
|
||||
t.Errorf("tx2ID and tx2Hash shouldn't be the same for non-coinbase transaction with signature and/or payload")
|
||||
}
|
||||
|
||||
tx2.Payload = []byte{}
|
||||
tx2.TxIn[0].SignatureScript = []byte{}
|
||||
newTx2Hash := tx2.TxHash()
|
||||
if !tx2ID.IsEqual(&newTx2Hash) {
|
||||
if !tx2ID.IsEqual((*daghash.TxID)(&newTx2Hash)) {
|
||||
t.Errorf("tx2ID and newTx2Hash should be the same for transaction without empty signature and payload")
|
||||
}
|
||||
}
|
||||
@ -951,7 +950,7 @@ var multiTx = &MsgTx{
|
||||
TxIn: []*TxIn{
|
||||
{
|
||||
PreviousOutPoint: OutPoint{
|
||||
TxID: daghash.Hash{},
|
||||
TxID: daghash.TxID{},
|
||||
Index: 0xffffffff,
|
||||
},
|
||||
SignatureScript: []byte{
|
||||
|
Loading…
x
Reference in New Issue
Block a user