[NOD-816] Remove TxIndex and AddrIndex (#653)

* [NOD-816] Remove TxIndex.

* [NOD-816] Remove AddrIndex.

* [NOD-816] Remove mentions of TxIndex and AddrIndex.

* [NOD-816] Remove mentions of getrawtransaction.

* [NOD-816] Remove mentions of searchrawtransaction.

* [NOD-816] Remove cmd/addsubnetwork.

* [NOD-816] Fix a comment.

* [NOD-816] Fix a comment.

* [NOD-816] Implement BlockDAG.TxConfirmations.

* [NOD-816] Return confirmations in getTxOut.

* [NOD-816] Rename TxConfirmations to UTXOConfirmations.

* [NOD-816] Rename txConfirmations to utxoConfirmations.

* [NOD-816] Fix capitalization in variable names.

* [NOD-816] Add acceptance index to addblock.

* [NOD-816] Get rid of txrawresult-confirmations.

* [NOD-816] Fix config flag.
This commit is contained in:
stasatdaglabs 2020-03-10 16:09:31 +02:00 committed by GitHub
parent b8a00f7519
commit 3d8dd8724d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 144 additions and 3138 deletions

View File

@ -1376,6 +1376,23 @@ func (dag *BlockDAG) BlockConfirmationsByHashNoLock(hash *daghash.Hash) (uint64,
return dag.blockConfirmations(node)
}
// UTXOConfirmations returns the confirmations for the given outpoint, if it exists
// in the DAG's UTXO set.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) UTXOConfirmations(outpoint *wire.Outpoint) (uint64, bool) {
dag.dagLock.RLock()
defer dag.dagLock.RUnlock()
utxoEntry, ok := dag.GetUTXOEntry(*outpoint)
if !ok {
return 0, false
}
confirmations := dag.SelectedTipBlueScore() - utxoEntry.BlockBlueScore() + 1
return confirmations, true
}
// UTXOCommitment returns a commitment to the dag's current UTXOSet
func (dag *BlockDAG) UTXOCommitment() string {
return dag.UTXOSet().UTXOMultiset.Hash().String()

View File

@ -1,902 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/pkg/errors"
"sync"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
const (
// addrIndexName is the human-readable name for the index.
addrIndexName = "address index"
// level0MaxEntries is the maximum number of transactions that are
// stored in level 0 of an address index entry. Subsequent levels store
// 2^n * level0MaxEntries entries, or in words, double the maximum of
// the previous level.
level0MaxEntries = 8
// addrKeySize is the number of bytes an address key consumes in the
// index. It consists of 1 byte address type + 20 bytes hash160.
addrKeySize = 1 + 20
// levelKeySize is the number of bytes a level key in the address index
// consumes. It consists of the address key + 1 byte for the level.
levelKeySize = addrKeySize + 1
// levelOffset is the offset in the level key which identifes the level.
levelOffset = levelKeySize - 1
// addrKeyTypePubKeyHash is the address type in an address key which
// represents both a pay-to-pubkey-hash and a pay-to-pubkey address.
// This is done because both are identical for the purposes of the
// address index.
addrKeyTypePubKeyHash = 0
// addrKeyTypeScriptHash is the address type in an address key which
// represents a pay-to-script-hash address. This is necessary because
// the hash of a pubkey address might be the same as that of a script
// hash.
addrKeyTypeScriptHash = 1
// Size of a transaction entry. It consists of 8 bytes block id + 4
// bytes offset + 4 bytes length.
txEntrySize = 8 + 4 + 4
)
var (
// addrIndexKey is the key of the address index and the db bucket used
// to house it.
addrIndexKey = []byte("txbyaddridx")
// errUnsupportedAddressType is an error that is used to signal an
// unsupported address type has been used.
errUnsupportedAddressType = errors.New("address type is not supported " +
"by the address index")
)
// -----------------------------------------------------------------------------
// The address index maps addresses referenced in the blockDAG to a list of
// all the transactions involving that address. Transactions are stored
// according to their order of appearance in the blockDAG. That is to say
// first by block height and then by offset inside the block. It is also
// important to note that this implementation requires the transaction index
// since it is needed in order to catch up old blocks due to the fact the spent
// outputs will already be pruned from the utxo set.
//
// The approach used to store the index is similar to a log-structured merge
// tree (LSM tree) and is thus similar to how leveldb works internally.
//
// Every address consists of one or more entries identified by a level starting
// from 0 where each level holds a maximum number of entries such that each
// subsequent level holds double the maximum of the previous one. In equation
// form, the number of entries each level holds is 2^n * firstLevelMaxSize.
//
// New transactions are appended to level 0 until it becomes full at which point
// the entire level 0 entry is appended to the level 1 entry and level 0 is
// cleared. This process continues until level 1 becomes full at which point it
// will be appended to level 2 and cleared and so on.
//
// The result of this is the lower levels contain newer transactions and the
// transactions within each level are ordered from oldest to newest.
//
// The intent of this approach is to provide a balance between space efficiency
// and indexing cost. Storing one entry per transaction would have the lowest
// indexing cost, but would waste a lot of space because the same address hash
// would be duplicated for every transaction key. On the other hand, storing a
// single entry with all transactions would be the most space efficient, but
// would cause indexing cost to grow quadratically with the number of
// transactions involving the same address. The approach used here provides
// logarithmic insertion and retrieval.
//
// The serialized key format is:
//
// <addr type><addr hash><level>
//
// Field Type Size
// addr type uint8 1 byte
// addr hash hash160 20 bytes
// level uint8 1 byte
// -----
// Total: 22 bytes
//
// The serialized value format is:
//
// [<block id><start offset><tx length>,...]
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes per indexed tx
// -----------------------------------------------------------------------------
// fetchBlockHashFunc defines a callback function to use in order to convert a
// serialized block ID to an associated block hash.
type fetchBlockHashFunc func(serializedID []byte) (*daghash.Hash, error)
// serializeAddrIndexEntry serializes the provided block id and transaction
// location according to the format described in detail above.
func serializeAddrIndexEntry(blockID uint64, txLoc wire.TxLoc) []byte {
// Serialize the entry.
serialized := make([]byte, 16)
byteOrder.PutUint64(serialized, blockID)
byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxStart))
byteOrder.PutUint32(serialized[12:], uint32(txLoc.TxLen))
return serialized
}
// deserializeAddrIndexEntry decodes the passed serialized byte slice into the
// provided region struct according to the format described in detail above and
// uses the passed block hash fetching function in order to conver the block ID
// to the associated block hash.
func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error {
// Ensure there are enough bytes to decode.
if len(serialized) < txEntrySize {
return errDeserialize("unexpected end of data")
}
hash, err := fetchBlockHash(serialized[0:8])
if err != nil {
return err
}
region.Hash = hash
region.Offset = byteOrder.Uint32(serialized[8:12])
region.Len = byteOrder.Uint32(serialized[12:16])
return nil
}
// keyForLevel returns the key for a specific address and level in the address
// index entry.
func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte {
var key [levelKeySize]byte
copy(key[:], addrKey[:])
key[levelOffset] = level
return key
}
// dbPutAddrIndexEntry updates the address index to include the provided entry
// according to the level-based scheme described in detail above.
func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint64, txLoc wire.TxLoc) error {
// Start with level 0 and its initial max number of entries.
curLevel := uint8(0)
maxLevelBytes := level0MaxEntries * txEntrySize
// Simply append the new entry to level 0 and return now when it will
// fit. This is the most common path.
newData := serializeAddrIndexEntry(blockID, txLoc)
level0Key := keyForLevel(addrKey, 0)
level0Data := bucket.Get(level0Key[:])
if len(level0Data)+len(newData) <= maxLevelBytes {
mergedData := newData
if len(level0Data) > 0 {
mergedData = make([]byte, len(level0Data)+len(newData))
copy(mergedData, level0Data)
copy(mergedData[len(level0Data):], newData)
}
return bucket.Put(level0Key[:], mergedData)
}
// At this point, level 0 is full, so merge each level into higher
// levels as many times as needed to free up level 0.
prevLevelData := level0Data
for {
// Each new level holds twice as much as the previous one.
curLevel++
maxLevelBytes *= 2
// Move to the next level as long as the current level is full.
curLevelKey := keyForLevel(addrKey, curLevel)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == maxLevelBytes {
prevLevelData = curLevelData
continue
}
// The current level has room for the data in the previous one,
// so merge the data from previous level into it.
mergedData := prevLevelData
if len(curLevelData) > 0 {
mergedData = make([]byte, len(curLevelData)+
len(prevLevelData))
copy(mergedData, curLevelData)
copy(mergedData[len(curLevelData):], prevLevelData)
}
err := bucket.Put(curLevelKey[:], mergedData)
if err != nil {
return err
}
// Move all of the levels before the previous one up a level.
for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- {
mergeLevelKey := keyForLevel(addrKey, mergeLevel)
prevLevelKey := keyForLevel(addrKey, mergeLevel-1)
prevData := bucket.Get(prevLevelKey[:])
err := bucket.Put(mergeLevelKey[:], prevData)
if err != nil {
return err
}
}
break
}
// Finally, insert the new entry into level 0 now that it is empty.
return bucket.Put(level0Key[:], newData)
}
// dbFetchAddrIndexEntries returns block regions for transactions referenced by
// the given address key and the number of entries skipped since it could have
// been less in the case where there are less total entries than the requested
// number of entries to skip.
func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]database.BlockRegion, uint32, error) {
// When the reverse flag is not set, all levels need to be fetched
// because numToSkip and numRequested are counted from the oldest
// transactions (highest level) and thus the total count is needed.
// However, when the reverse flag is set, only enough records to satisfy
// the requested amount are needed.
var level uint8
var serialized []byte
for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize {
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if levelData == nil {
// Stop when there are no more levels.
break
}
// Higher levels contain older transactions, so prepend them.
prepended := make([]byte, len(serialized)+len(levelData))
copy(prepended, levelData)
copy(prepended[len(levelData):], serialized)
serialized = prepended
level++
}
// When the requested number of entries to skip is larger than the
// number available, skip them all and return now with the actual number
// skipped.
numEntries := uint32(len(serialized) / txEntrySize)
if numToSkip >= numEntries {
return nil, numEntries, nil
}
// Nothing more to do when there are no requested entries.
if numRequested == 0 {
return nil, numToSkip, nil
}
// Limit the number to load based on the number of available entries,
// the number to skip, and the number requested.
numToLoad := numEntries - numToSkip
if numToLoad > numRequested {
numToLoad = numRequested
}
// Start the offset after all skipped entries and load the calculated
// number.
results := make([]database.BlockRegion, numToLoad)
for i := uint32(0); i < numToLoad; i++ {
// Calculate the read offset according to the reverse flag.
var offset uint32
if reverse {
offset = (numEntries - numToSkip - i - 1) * txEntrySize
} else {
offset = (numToSkip + i) * txEntrySize
}
// Deserialize and populate the result.
err := deserializeAddrIndexEntry(serialized[offset:],
&results[i], fetchBlockHash)
if err != nil {
// Ensure any deserialization errors are returned as
// database corruption errors.
if isDeserializeErr(err) {
err = database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("failed to "+
"deserialized address index "+
"for key %x: %s", addrKey, err),
}
}
return nil, 0, err
}
}
return results, numToSkip, nil
}
// minEntriesToReachLevel returns the minimum number of entries that are
// required to reach the given address index level.
func minEntriesToReachLevel(level uint8) int {
maxEntriesForLevel := level0MaxEntries
minRequired := 1
for l := uint8(1); l <= level; l++ {
minRequired += maxEntriesForLevel
maxEntriesForLevel *= 2
}
return minRequired
}
// maxEntriesForLevel returns the maximum number of entries allowed for the
// given address index level.
func maxEntriesForLevel(level uint8) int {
numEntries := level0MaxEntries
for l := level; l > 0; l-- {
numEntries *= 2
}
return numEntries
}
// dbRemoveAddrIndexEntries removes the specified number of entries from from
// the address index for the provided key. An assertion error will be returned
// if the count exceeds the total number of entries in the index.
func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error {
// Nothing to do if no entries are being deleted.
if count <= 0 {
return nil
}
// Make use of a local map to track pending updates and define a closure
// to apply it to the database. This is done in order to reduce the
// number of database reads and because there is more than one exit
// path that needs to apply the updates.
pendingUpdates := make(map[uint8][]byte)
applyPending := func() error {
for level, data := range pendingUpdates {
curLevelKey := keyForLevel(addrKey, level)
if len(data) == 0 {
err := bucket.Delete(curLevelKey[:])
if err != nil {
return err
}
continue
}
err := bucket.Put(curLevelKey[:], data)
if err != nil {
return err
}
}
return nil
}
// Loop forwards through the levels while removing entries until the
// specified number has been removed. This will potentially result in
// entirely empty lower levels which will be backfilled below.
var highestLoadedLevel uint8
numRemaining := count
for level := uint8(0); numRemaining > 0; level++ {
// Load the data for the level from the database.
curLevelKey := keyForLevel(addrKey, level)
curLevelData := bucket.Get(curLevelKey[:])
if len(curLevelData) == 0 && numRemaining > 0 {
return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+
"not enough entries for address key %x to "+
"delete %d entries", addrKey, count))
}
pendingUpdates[level] = curLevelData
highestLoadedLevel = level
// Delete the entire level as needed.
numEntries := len(curLevelData) / txEntrySize
if numRemaining >= numEntries {
pendingUpdates[level] = nil
numRemaining -= numEntries
continue
}
// Remove remaining entries to delete from the level.
offsetEnd := len(curLevelData) - (numRemaining * txEntrySize)
pendingUpdates[level] = curLevelData[:offsetEnd]
break
}
// When all elements in level 0 were not removed there is nothing left
// to do other than updating the database.
if len(pendingUpdates[0]) != 0 {
return applyPending()
}
// At this point there are one or more empty levels before the current
// level which need to be backfilled and the current level might have
// had some entries deleted from it as well. Since all levels after
// level 0 are required to either be empty, half full, or completely
// full, the current level must be adjusted accordingly by backfilling
// each previous levels in a way which satisfies the requirements. Any
// entries that are left are assigned to level 0 after the loop as they
// are guaranteed to fit by the logic in the loop. In other words, this
// effectively squashes all remaining entries in the current level into
// the lowest possible levels while following the level rules.
//
// Note that the level after the current level might also have entries
// and gaps are not allowed, so this also keeps track of the lowest
// empty level so the code below knows how far to backfill in case it is
// required.
lowestEmptyLevel := uint8(255)
curLevelData := pendingUpdates[highestLoadedLevel]
curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel)
for level := highestLoadedLevel; level > 0; level-- {
// When there are not enough entries left in the current level
// for the number that would be required to reach it, clear the
// the current level which effectively moves them all up to the
// previous level on the next iteration. Otherwise, there are
// are sufficient entries, so update the current level to
// contain as many entries as possible while still leaving
// enough remaining entries required to reach the level.
numEntries := len(curLevelData) / txEntrySize
prevLevelMaxEntries := curLevelMaxEntries / 2
minPrevRequired := minEntriesToReachLevel(level - 1)
if numEntries < prevLevelMaxEntries+minPrevRequired {
lowestEmptyLevel = level
pendingUpdates[level] = nil
} else {
// This level can only be completely full or half full,
// so choose the appropriate offset to ensure enough
// entries remain to reach the level.
var offset int
if numEntries-curLevelMaxEntries >= minPrevRequired {
offset = curLevelMaxEntries * txEntrySize
} else {
offset = prevLevelMaxEntries * txEntrySize
}
pendingUpdates[level] = curLevelData[:offset]
curLevelData = curLevelData[offset:]
}
curLevelMaxEntries = prevLevelMaxEntries
}
pendingUpdates[0] = curLevelData
if len(curLevelData) == 0 {
lowestEmptyLevel = 0
}
// When the highest loaded level is empty, it's possible the level after
// it still has data and thus that data needs to be backfilled as well.
for len(pendingUpdates[highestLoadedLevel]) == 0 {
// When the next level is empty too, the is no data left to
// continue backfilling, so there is nothing left to do.
// Otherwise, populate the pending updates map with the newly
// loaded data and update the highest loaded level accordingly.
level := highestLoadedLevel + 1
curLevelKey := keyForLevel(addrKey, level)
levelData := bucket.Get(curLevelKey[:])
if len(levelData) == 0 {
break
}
pendingUpdates[level] = levelData
highestLoadedLevel = level
// At this point the highest level is not empty, but it might
// be half full. When that is the case, move it up a level to
// simplify the code below which backfills all lower levels that
// are still empty. This also means the current level will be
// empty, so the loop will perform another another iteration to
// potentially backfill this level with data from the next one.
curLevelMaxEntries := maxEntriesForLevel(level)
if len(levelData)/txEntrySize != curLevelMaxEntries {
pendingUpdates[level] = nil
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// Backfill all lower levels that are still empty by iteratively
// halfing the data until the lowest empty level is filled.
for level > lowestEmptyLevel {
offset := (curLevelMaxEntries / 2) * txEntrySize
pendingUpdates[level] = levelData[:offset]
levelData = levelData[offset:]
pendingUpdates[level-1] = levelData
level--
curLevelMaxEntries /= 2
}
// The lowest possible empty level is now the highest loaded
// level.
lowestEmptyLevel = highestLoadedLevel
}
// Apply the pending updates.
return applyPending()
}
// addrToKey converts known address types to an addrindex key. An error is
// returned for unsupported types.
func addrToKey(addr util.Address) ([addrKeySize]byte, error) {
switch addr := addr.(type) {
case *util.AddressPubKeyHash:
var result [addrKeySize]byte
result[0] = addrKeyTypePubKeyHash
copy(result[1:], addr.Hash160()[:])
return result, nil
case *util.AddressScriptHash:
var result [addrKeySize]byte
result[0] = addrKeyTypeScriptHash
copy(result[1:], addr.Hash160()[:])
return result, nil
}
return [addrKeySize]byte{}, errUnsupportedAddressType
}
// AddrIndex implements a transaction by address index. That is to say, it
// supports querying all transactions that reference a given address because
// they are either crediting or debiting the address. The returned transactions
// are ordered according to their order of appearance in the blockDAG. In
// other words, first by block height and then by offset inside the block.
//
// In addition, support is provided for a memory-only index of unconfirmed
// transactions such as those which are kept in the memory pool before inclusion
// in a block.
type AddrIndex struct {
// The following fields are set when the instance is created and can't
// be changed afterwards, so there is no need to protect them with a
// separate mutex.
db database.DB
dagParams *dagconfig.Params
// The following fields are used to quickly link transactions and
// addresses that have not been included into a block yet when an
// address index is being maintained. The are protected by the
// unconfirmedLock field.
//
// The txnsByAddr field is used to keep an index of all transactions
// which either create an output to a given address or spend from a
// previous output to it keyed by the address.
//
// The addrsByTx field is essentially the reverse and is used to
// keep an index of all addresses which a given transaction involves.
// This allows fairly efficient updates when transactions are removed
// once they are included into a block.
unconfirmedLock sync.RWMutex
txnsByAddr map[[addrKeySize]byte]map[daghash.TxID]*util.Tx
addrsByTx map[daghash.TxID]map[[addrKeySize]byte]struct{}
}
// Ensure the AddrIndex type implements the Indexer interface.
var _ Indexer = (*AddrIndex)(nil)
// Ensure the AddrIndex type implements the NeedsInputser interface.
var _ NeedsInputser = (*AddrIndex)(nil)
// NeedsInputs signals that the index requires the referenced inputs in order
// to properly create the index.
//
// This implements the NeedsInputser interface.
func (idx *AddrIndex) NeedsInputs() bool {
return true
}
// Init is only provided to satisfy the Indexer interface as there is nothing to
// initialize for this index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
idx.db = db
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Key() []byte {
return addrIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Name() string {
return addrIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the bucket for the address
// index.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Create(dbTx database.Tx) error {
_, err := dbTx.Metadata().CreateBucket(addrIndexKey)
return err
}
// writeIndexData represents the address index data to be written for one block.
// It consists of the address mapped to an ordered list of the transactions
// that involve the address in block. It is ordered so the transactions can be
// stored in the order they appear in the block.
type writeIndexData map[[addrKeySize]byte][]int
// indexScriptPubKey extracts all standard addresses from the passed public key
// script and maps each of them to the associated transaction using the passed
// map.
func (idx *AddrIndex) indexScriptPubKey(data writeIndexData, scriptPubKey []byte, txIdx int) {
// Nothing to index if the script is non-standard or otherwise doesn't
// contain any addresses.
_, addr, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
if err != nil || addr == nil {
return
}
addrKey, err := addrToKey(addr)
if err != nil {
// Ignore unsupported address types.
return
}
// Avoid inserting the transaction more than once. Since the
// transactions are indexed serially any duplicates will be
// indexed in a row, so checking the most recent entry for the
// address is enough to detect duplicates.
indexedTxns := data[addrKey]
numTxns := len(indexedTxns)
if numTxns > 0 && indexedTxns[numTxns-1] == txIdx {
return
}
indexedTxns = append(indexedTxns, txIdx)
data[addrKey] = indexedTxns
}
// indexBlock extract all of the standard addresses from all of the transactions
// in the passed block and maps each of them to the associated transaction using
// the passed map.
func (idx *AddrIndex) indexBlock(data writeIndexData, block *util.Block, dag *blockdag.BlockDAG) {
for txIdx, tx := range block.Transactions() {
// Coinbases do not reference any inputs. Since the block is
// required to have already gone through full validation, it has
// already been proven on the first transaction in the block is
// a coinbase.
if txIdx > util.CoinbaseTransactionIndex {
for _, txIn := range tx.MsgTx().TxIn {
// The UTXO should always have the input since
// the index contract requires it, however, be
// safe and simply ignore any missing entries.
entry, ok := dag.GetUTXOEntry(txIn.PreviousOutpoint)
if !ok {
continue
}
idx.indexScriptPubKey(data, entry.ScriptPubKey(), txIdx)
}
}
for _, txOut := range tx.MsgTx().TxOut {
idx.indexScriptPubKey(data, txOut.ScriptPubKey, txIdx)
}
}
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a mapping for each address
// the transactions in the block involve.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
_ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// Build all of the address to transaction mappings in a local map.
addrsToTxns := make(writeIndexData)
idx.indexBlock(addrsToTxns, block, dag)
// Add all of the index entries for each address.
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
for addrKey, txIdxs := range addrsToTxns {
for _, txIdx := range txIdxs {
err := dbPutAddrIndexEntry(addrIdxBucket, addrKey,
blockID, txLocs[txIdx])
if err != nil {
return err
}
}
}
return nil
}
// TxRegionsForAddress returns a slice of block regions which identify each
// transaction that involves the passed address according to the specified
// number to skip, number requested, and whether or not the results should be
// reversed. It also returns the number actually skipped since it could be less
// in the case where there are not enough entries.
//
// NOTE: These results only include transactions confirmed in blocks. See the
// UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions
// that involve a given address.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) TxRegionsForAddress(dbTx database.Tx, addr util.Address, numToSkip, numRequested uint32, reverse bool) ([]database.BlockRegion, uint32, error) {
addrKey, err := addrToKey(addr)
if err != nil {
return nil, 0, err
}
var regions []database.BlockRegion
var skipped uint32
err = idx.db.View(func(dbTx database.Tx) error {
// Create closure to lookup the block hash given the ID using
// the database transaction.
fetchBlockHash := func(id []byte) (*daghash.Hash, error) {
// Deserialize and populate the result.
return blockdag.DBFetchBlockHashBySerializedID(dbTx, id)
}
var err error
addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey)
regions, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket,
addrKey, numToSkip, numRequested, reverse,
fetchBlockHash)
return err
})
return regions, skipped, err
}
// indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address
// index to include mappings for the addresses encoded by the passed public key
// script to the transaction.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) indexUnconfirmedAddresses(scriptPubKey []byte, tx *util.Tx) {
// The error is ignored here since the only reason it can fail is if the
// script fails to parse and it was already validated before being
// admitted to the mempool.
_, addr, _ := txscript.ExtractScriptPubKeyAddress(scriptPubKey,
idx.dagParams)
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return
}
// Add a mapping from the address to the transaction.
idx.unconfirmedLock.Lock()
defer idx.unconfirmedLock.Unlock()
addrIndexEntry := idx.txnsByAddr[addrKey]
if addrIndexEntry == nil {
addrIndexEntry = make(map[daghash.TxID]*util.Tx)
idx.txnsByAddr[addrKey] = addrIndexEntry
}
addrIndexEntry[*tx.ID()] = tx
// Add a mapping from the transaction to the address.
addrsByTxEntry := idx.addrsByTx[*tx.ID()]
if addrsByTxEntry == nil {
addrsByTxEntry = make(map[[addrKeySize]byte]struct{})
idx.addrsByTx[*tx.ID()] = addrsByTxEntry
}
addrsByTxEntry[addrKey] = struct{}{}
}
// AddUnconfirmedTx adds all addresses related to the transaction to the
// unconfirmed (memory-only) address index.
//
// NOTE: This transaction MUST have already been validated by the memory pool
// before calling this function with it and have all of the inputs available in
// the provided utxo view. Failure to do so could result in some or all
// addresses not being indexed.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) AddUnconfirmedTx(tx *util.Tx, utxoSet blockdag.UTXOSet) {
// Index addresses of all referenced previous transaction outputs.
//
// The existence checks are elided since this is only called after the
// transaction has already been validated and thus all inputs are
// already known to exist.
for _, txIn := range tx.MsgTx().TxIn {
entry, ok := utxoSet.Get(txIn.PreviousOutpoint)
if !ok {
// Ignore missing entries. This should never happen
// in practice since the function comments specifically
// call out all inputs must be available.
continue
}
idx.indexUnconfirmedAddresses(entry.ScriptPubKey(), tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
idx.indexUnconfirmedAddresses(txOut.ScriptPubKey, tx)
}
}
// RemoveUnconfirmedTx removes the passed transaction from the unconfirmed
// (memory-only) address index.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) RemoveUnconfirmedTx(txID *daghash.TxID) {
idx.unconfirmedLock.Lock()
defer idx.unconfirmedLock.Unlock()
// Remove all address references to the transaction from the address
// index and remove the entry for the address altogether if it no longer
// references any transactions.
for addrKey := range idx.addrsByTx[*txID] {
delete(idx.txnsByAddr[addrKey], *txID)
if len(idx.txnsByAddr[addrKey]) == 0 {
delete(idx.txnsByAddr, addrKey)
}
}
// Remove the entry from the transaction to address lookup map as well.
delete(idx.addrsByTx, *txID)
}
// UnconfirmedTxnsForAddress returns all transactions currently in the
// unconfirmed (memory-only) address index that involve the passed address.
// Unsupported address types are ignored and will result in no results.
//
// This function is safe for concurrent access.
func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr util.Address) []*util.Tx {
// Ignore unsupported address types.
addrKey, err := addrToKey(addr)
if err != nil {
return nil
}
// Protect concurrent access.
idx.unconfirmedLock.RLock()
defer idx.unconfirmedLock.RUnlock()
// Return a new slice with the results if there are any. This ensures
// safe concurrency.
if txns, exists := idx.txnsByAddr[addrKey]; exists {
addressTxns := make([]*util.Tx, 0, len(txns))
for _, tx := range txns {
addressTxns = append(addressTxns, tx)
}
return addressTxns
}
return nil
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *AddrIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("addrindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the addrindex with --dropaddrindex", lastKnownBlockID-currentBlockID)
}
// NewAddrIndex returns a new instance of an indexer that is used to create a
// mapping of all addresses in the blockDAG to the respective transactions
// that involve them.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockDAG package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewAddrIndex(dagParams *dagconfig.Params) *AddrIndex {
return &AddrIndex{
dagParams: dagParams,
txnsByAddr: make(map[[addrKeySize]byte]map[daghash.TxID]*util.Tx),
addrsByTx: make(map[daghash.TxID]map[[addrKeySize]byte]struct{}),
}
}
// DropAddrIndex drops the address index from the provided database if it
// exists.
func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error {
return dropIndex(db, addrIndexKey, addrIndexName, interrupt)
}

View File

@ -1,277 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"testing"
"github.com/kaspanet/kaspad/wire"
)
// addrIndexBucket provides a mock address index database bucket by implementing
// the internalBucket interface.
type addrIndexBucket struct {
levels map[[levelKeySize]byte][]byte
}
// Clone returns a deep copy of the mock address index bucket.
func (b *addrIndexBucket) Clone() *addrIndexBucket {
levels := make(map[[levelKeySize]byte][]byte)
for k, v := range b.levels {
vCopy := make([]byte, len(v))
copy(vCopy, v)
levels[k] = vCopy
}
return &addrIndexBucket{levels: levels}
}
// Get returns the value associated with the key from the mock address index
// bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Get(key []byte) []byte {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
return b.levels[levelKey]
}
// Put stores the provided key/value pair to the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Put(key []byte, value []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
b.levels[levelKey] = value
return nil
}
// Delete removes the provided key from the mock address index bucket.
//
// This is part of the internalBucket interface.
func (b *addrIndexBucket) Delete(key []byte) error {
var levelKey [levelKeySize]byte
copy(levelKey[:], key)
delete(b.levels, levelKey)
return nil
}
// printLevels returns a string with a visual representation of the provided
// address key taking into account the max size of each level. It is useful
// when creating and debugging test cases.
func (b *addrIndexBucket) printLevels(addrKey [addrKeySize]byte) string {
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
var levelBuf bytes.Buffer
_, _ = levelBuf.WriteString("\n")
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
_, _ = levelBuf.WriteString(fmt.Sprintf("%02d ", num))
}
for i := numEntries; i < maxEntries; i++ {
_, _ = levelBuf.WriteString("_ ")
}
_, _ = levelBuf.WriteString("\n")
maxEntries *= 2
}
return levelBuf.String()
}
// sanityCheck ensures that all data stored in the bucket for the given address
// adheres to the level-based rules described by the address index
// documentation.
func (b *addrIndexBucket) sanityCheck(addrKey [addrKeySize]byte, expectedTotal int) error {
// Find the highest level for the key.
highestLevel := uint8(0)
for k := range b.levels {
if !bytes.Equal(k[:levelOffset], addrKey[:]) {
continue
}
level := uint8(k[levelOffset])
if level > highestLevel {
highestLevel = level
}
}
// Ensure the expected total number of entries are present and that
// all levels adhere to the rules described in the address index
// documentation.
var totalEntries int
maxEntries := level0MaxEntries
for level := uint8(0); level <= highestLevel; level++ {
// Level 0 can'have more entries than the max allowed if the
// levels after it have data and it can't be empty. All other
// levels must either be half full or full.
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
totalEntries += numEntries
if level == 0 {
if (highestLevel != 0 && numEntries == 0) ||
numEntries > maxEntries {
return errors.Errorf("level %d has %d entries",
level, numEntries)
}
} else if numEntries != maxEntries && numEntries != maxEntries/2 {
return errors.Errorf("level %d has %d entries", level,
numEntries)
}
maxEntries *= 2
}
if totalEntries != expectedTotal {
return errors.Errorf("expected %d entries - got %d", expectedTotal,
totalEntries)
}
// Ensure all of the numbers are in order starting from the highest
// level moving to the lowest level.
expectedNum := uint32(0)
for level := highestLevel + 1; level > 0; level-- {
data := b.levels[keyForLevel(addrKey, level)]
numEntries := len(data) / txEntrySize
for i := 0; i < numEntries; i++ {
start := i * txEntrySize
num := byteOrder.Uint32(data[start:])
if num != expectedNum {
return errors.Errorf("level %d offset %d does "+
"not contain the expected number of "+
"%d - got %d", level, i, num,
expectedNum)
}
expectedNum++
}
}
return nil
}
// TestAddrIndexLevels ensures that adding and deleting entries to the address
// index creates multiple levels as described by the address index
// documentation.
func TestAddrIndexLevels(t *testing.T) {
t.Parallel()
tests := []struct {
name string
key [addrKeySize]byte
numInsert int
printLevels bool // Set to help debug a specific test.
}{
{
name: "level 0 not full",
numInsert: level0MaxEntries - 1,
},
{
name: "level 1 half",
numInsert: level0MaxEntries + 1,
},
{
name: "level 1 full",
numInsert: level0MaxEntries*2 + 1,
},
{
name: "level 2 half, level 1 half",
numInsert: level0MaxEntries*3 + 1,
},
{
name: "level 2 half, level 1 full",
numInsert: level0MaxEntries*4 + 1,
},
{
name: "level 2 full, level 1 half",
numInsert: level0MaxEntries*5 + 1,
},
{
name: "level 2 full, level 1 full",
numInsert: level0MaxEntries*6 + 1,
},
{
name: "level 3 half, level 2 half, level 1 half",
numInsert: level0MaxEntries*7 + 1,
},
{
name: "level 3 full, level 2 half, level 1 full",
numInsert: level0MaxEntries*12 + 1,
},
}
nextTest:
for testNum, test := range tests {
// Insert entries in order.
populatedBucket := &addrIndexBucket{
levels: make(map[[levelKeySize]byte][]byte),
}
for i := 0; i < test.numInsert; i++ {
txLoc := wire.TxLoc{TxStart: i * 2}
err := dbPutAddrIndexEntry(populatedBucket, test.key,
uint64(i), txLoc)
if err != nil {
t.Errorf("dbPutAddrIndexEntry #%d (%s) - "+
"unexpected error: %v", testNum,
test.name, err)
continue nextTest
}
}
if test.printLevels {
t.Log(populatedBucket.printLevels(test.key))
}
// Delete entries from the populated bucket until all entries
// have been deleted. The bucket is reset to the fully
// populated bucket on each iteration so every combination is
// tested. Notice the upper limit purposes exceeds the number
// of entries to ensure attempting to delete more entries than
// there are works correctly.
for numDelete := 0; numDelete <= test.numInsert+1; numDelete++ {
// Clone populated bucket to run each delete against.
bucket := populatedBucket.Clone()
// Remove the number of entries for this iteration.
err := dbRemoveAddrIndexEntries(bucket, test.key,
numDelete)
if err != nil {
if numDelete <= test.numInsert {
t.Errorf("dbRemoveAddrIndexEntries (%s) "+
" delete %d - unexpected error: "+
"%v", test.name, numDelete, err)
continue nextTest
}
}
if test.printLevels {
t.Log(bucket.printLevels(test.key))
}
// Sanity check the levels to ensure the adhere to all
// rules.
numExpected := test.numInsert
if numDelete <= test.numInsert {
numExpected -= numDelete
}
err = bucket.sanityCheck(test.key, numExpected)
if err != nil {
t.Errorf("sanity check fail (%s) delete %d: %v",
test.name, numDelete, err)
continue nextTest
}
}
}
}

View File

@ -1,400 +0,0 @@
// Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
"fmt"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
)
const (
// txIndexName is the human-readable name for the index.
txIndexName = "transaction index"
includingBlocksIndexKeyEntrySize = 8 // 4 bytes for offset + 4 bytes for transaction length
)
var (
includingBlocksIndexKey = []byte("includingblocksidx")
acceptingBlocksIndexKey = []byte("acceptingblocksidx")
)
// txsAcceptedByVirtual is the in-memory index of txIDs that were accepted
// by the current virtual
var txsAcceptedByVirtual map[daghash.TxID]bool
// -----------------------------------------------------------------------------
// The transaction index consists of an entry for every transaction in the DAG.
//
// There are two buckets used in total. The first bucket maps the hash of
// each transaction to its location in each block it's included in. The second bucket
// contains all of the blocks that from their viewpoint the transaction has been
// accepted (i.e. the transaction is found in their blue set without double spends),
// and their blue block (or themselves) that included the transaction.
//
// NOTE: Although it is technically possible for multiple transactions to have
// the same hash as long as the previous transaction with the same hash is fully
// spent, this code only stores the most recent one because doing otherwise
// would add a non-trivial amount of space and overhead for something that will
// realistically never happen per the probability and even if it did, the old
// one must be fully spent and so the most likely transaction a caller would
// want for a given hash is the most recent one anyways.
//
// The including blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <block id> = <start offset><tx length>
//
// Field Type Size
// block id uint64 8 bytes
// start offset uint32 4 bytes
// tx length uint32 4 bytes
// -----
// Total: 16 bytes
//
// The accepting blocks index contains a sub bucket for each transaction hash (32 byte each), that its serialized format is:
//
// <accepting block id> = <including block id>
//
// Field Type Size
// accepting block id uint64 8 bytes
// including block id uint64 8 bytes
// -----
// Total: 16 bytes
//
// -----------------------------------------------------------------------------
func putIncludingBlocksEntry(target []byte, txLoc wire.TxLoc) {
byteOrder.PutUint32(target, uint32(txLoc.TxStart))
byteOrder.PutUint32(target[4:], uint32(txLoc.TxLen))
}
func dbPutIncludingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(includingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
func dbPutAcceptingBlocksEntry(dbTx database.Tx, txID *daghash.TxID, blockID uint64, serializedData []byte) error {
bucket, err := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).CreateBucketIfNotExists(txID[:])
if err != nil {
return err
}
return bucket.Put(blockdag.SerializeBlockID(blockID), serializedData)
}
// dbFetchFirstTxRegion uses an existing database transaction to fetch the block
// region for the provided transaction hash from the transaction index. When
// there is no entry for the provided hash, nil will be returned for the both
// the region and the error.
//
// P.S Because the transaction can be found in multiple blocks, this function arbitarily
// returns the first block region that is stored in the txindex.
func dbFetchFirstTxRegion(dbTx database.Tx, txID *daghash.TxID) (*database.BlockRegion, error) {
// Load the record from the database and return now if it doesn't exist.
txBucket := dbTx.Metadata().Bucket(includingBlocksIndexKey).Bucket(txID[:])
if txBucket == nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
cursor := txBucket.Cursor()
if ok := cursor.First(); !ok {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("No block region "+
"was found for %s", txID),
}
}
serializedBlockID := cursor.Key()
serializedData := cursor.Value()
if len(serializedData) == 0 {
return nil, nil
}
// Ensure the serialized data has enough bytes to properly deserialize.
if len(serializedData) < includingBlocksIndexKeyEntrySize {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s", txID),
}
}
// Load the block hash associated with the block ID.
hash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, serializedBlockID)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt transaction index "+
"entry for %s: %s", txID, err),
}
}
// Deserialize the final entry.
region := database.BlockRegion{Hash: &daghash.Hash{}}
copy(region.Hash[:], hash[:])
region.Offset = byteOrder.Uint32(serializedData[:4])
region.Len = byteOrder.Uint32(serializedData[4:])
return &region, nil
}
// dbAddTxIndexEntries uses an existing database transaction to add a
// transaction index entry for every transaction in the passed block.
func dbAddTxIndexEntries(dbTx database.Tx, block *util.Block, blockID uint64, multiBlockTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// The offset and length of the transactions within the serialized
// block.
txLocs, err := block.TxLoc()
if err != nil {
return err
}
// As an optimization, allocate a single slice big enough to hold all
// of the serialized transaction index entries for the block and
// serialize them directly into the slice. Then, pass the appropriate
// subslice to the database to be written. This approach significantly
// cuts down on the number of required allocations.
includingBlocksOffset := 0
serializedIncludingBlocksValues := make([]byte, len(block.Transactions())*includingBlocksIndexKeyEntrySize)
for i, tx := range block.Transactions() {
putIncludingBlocksEntry(serializedIncludingBlocksValues[includingBlocksOffset:], txLocs[i])
endOffset := includingBlocksOffset + includingBlocksIndexKeyEntrySize
err := dbPutIncludingBlocksEntry(dbTx, tx.ID(), blockID,
serializedIncludingBlocksValues[includingBlocksOffset:endOffset:endOffset])
if err != nil {
return err
}
includingBlocksOffset += includingBlocksIndexKeyEntrySize
}
for _, blockTxsAcceptanceData := range multiBlockTxsAcceptanceData {
var includingBlockID uint64
if blockTxsAcceptanceData.BlockHash.IsEqual(block.Hash()) {
includingBlockID = blockID
} else {
includingBlockID, err = blockdag.DBFetchBlockIDByHash(dbTx, &blockTxsAcceptanceData.BlockHash)
if err != nil {
return err
}
}
serializedIncludingBlockID := blockdag.SerializeBlockID(includingBlockID)
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
if !txAcceptanceData.IsAccepted {
continue
}
err = dbPutAcceptingBlocksEntry(dbTx, txAcceptanceData.Tx.ID(), blockID, serializedIncludingBlockID)
if err != nil {
return err
}
}
}
return nil
}
func updateTxsAcceptedByVirtual(virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
// Initialize a new txsAcceptedByVirtual
entries := 0
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
entries += len(blockTxsAcceptanceData.TxAcceptanceData)
}
txsAcceptedByVirtual = make(map[daghash.TxID]bool, entries)
// Copy virtualTxsAcceptanceData to txsAcceptedByVirtual
for _, blockTxsAcceptanceData := range virtualTxsAcceptanceData {
for _, txAcceptanceData := range blockTxsAcceptanceData.TxAcceptanceData {
txsAcceptedByVirtual[*txAcceptanceData.Tx.ID()] = true
}
}
return nil
}
// TxIndex implements a transaction by hash index. That is to say, it supports
// querying all transactions by their hash.
type TxIndex struct {
db database.DB
}
// Ensure the TxIndex type implements the Indexer interface.
var _ Indexer = (*TxIndex)(nil)
// Init initializes the hash-based transaction index. In particular, it finds
// the highest used block ID and stores it for later use when connecting or
// disconnecting blocks.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Init(db database.DB, dag *blockdag.BlockDAG) error {
idx.db = db
// Initialize the txsAcceptedByVirtual index
virtualTxsAcceptanceData, err := dag.TxsAcceptedByVirtual()
if err != nil {
return err
}
err = updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// Key returns the database key to use for the index as a byte slice.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Key() []byte {
return includingBlocksIndexKey
}
// Name returns the human-readable name of the index.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Name() string {
return txIndexName
}
// Create is invoked when the indexer manager determines the index needs
// to be created for the first time. It creates the buckets for the hash-based
// transaction index and the internal block ID indexes.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Create(dbTx database.Tx) error {
meta := dbTx.Metadata()
if _, err := meta.CreateBucket(includingBlocksIndexKey); err != nil {
return err
}
_, err := meta.CreateBucket(acceptingBlocksIndexKey)
return err
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the DAG. This indexer adds a hash-to-transaction mapping
// for every transaction in the passed block.
//
// This is part of the Indexer interface.
func (idx *TxIndex) ConnectBlock(dbTx database.Tx, block *util.Block, blockID uint64, dag *blockdag.BlockDAG,
acceptedTxsData blockdag.MultiBlockTxsAcceptanceData, virtualTxsAcceptanceData blockdag.MultiBlockTxsAcceptanceData) error {
if err := dbAddTxIndexEntries(dbTx, block, blockID, acceptedTxsData); err != nil {
return err
}
err := updateTxsAcceptedByVirtual(virtualTxsAcceptanceData)
if err != nil {
return err
}
return nil
}
// TxFirstBlockRegion returns the first block region for the provided transaction hash
// from the transaction index. The block region can in turn be used to load the
// raw transaction bytes. When there is no entry for the provided hash, nil
// will be returned for the both the entry and the error.
//
// This function is safe for concurrent access.
func (idx *TxIndex) TxFirstBlockRegion(txID *daghash.TxID) (*database.BlockRegion, error) {
var region *database.BlockRegion
err := idx.db.View(func(dbTx database.Tx) error {
var err error
region, err = dbFetchFirstTxRegion(dbTx, txID)
return err
})
return region, err
}
// BlockThatAcceptedTx returns the hash of the block where the transaction got accepted (from the virtual block point of view)
func (idx *TxIndex) BlockThatAcceptedTx(dag *blockdag.BlockDAG, txID *daghash.TxID) (*daghash.Hash, error) {
var acceptingBlock *daghash.Hash
err := idx.db.View(func(dbTx database.Tx) error {
var err error
acceptingBlock, err = dbFetchTxAcceptingBlock(dbTx, txID, dag)
return err
})
return acceptingBlock, err
}
func dbFetchTxAcceptingBlock(dbTx database.Tx, txID *daghash.TxID, dag *blockdag.BlockDAG) (*daghash.Hash, error) {
// If the transaction was accepted by the current virtual,
// return the zeroHash immediately
if _, ok := txsAcceptedByVirtual[*txID]; ok {
return &daghash.ZeroHash, nil
}
bucket := dbTx.Metadata().Bucket(acceptingBlocksIndexKey).Bucket(txID[:])
if bucket == nil {
return nil, nil
}
cursor := bucket.Cursor()
if !cursor.First() {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("Accepting blocks bucket is "+
"empty for %s", txID),
}
}
for ; cursor.Key() != nil; cursor.Next() {
blockHash, err := blockdag.DBFetchBlockHashBySerializedID(dbTx, cursor.Key())
if err != nil {
return nil, err
}
isBlockInSelectedParentChain, err := dag.IsInSelectedParentChain(blockHash)
if err != nil {
return nil, err
}
if isBlockInSelectedParentChain {
return blockHash, nil
}
}
return nil, nil
}
// NewTxIndex returns a new instance of an indexer that is used to create a
// mapping of the hashes of all transactions in the blockDAG to the respective
// block, location within the block, and size of the transaction.
//
// It implements the Indexer interface which plugs into the IndexManager that in
// turn is used by the blockdag package. This allows the index to be
// seamlessly maintained along with the DAG.
func NewTxIndex() *TxIndex {
return &TxIndex{}
}
// DropTxIndex drops the transaction index from the provided database if it
// exists. Since the address index relies on it, the address index will also be
// dropped when it exists.
func DropTxIndex(db database.DB, interrupt <-chan struct{}) error {
err := dropIndex(db, addrIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
err = dropIndex(db, includingBlocksIndexKey, addrIndexName, interrupt)
if err != nil {
return err
}
return dropIndex(db, acceptingBlocksIndexKey, txIndexName, interrupt)
}
// Recover is invoked when the indexer wasn't turned on for several blocks
// and the indexer needs to close the gaps.
//
// This is part of the Indexer interface.
func (idx *TxIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
return errors.Errorf("txindex was turned off for %d blocks and can't be recovered."+
" To resume working drop the txindex with --droptxindex", lastKnownBlockID-currentBlockID)
}

View File

@ -1,144 +0,0 @@
package indexers
import (
"bytes"
"reflect"
"testing"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/mining"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
func createTransaction(t *testing.T, value uint64, originTx *wire.MsgTx, outputIndex uint32) *wire.MsgTx {
signatureScript, err := txscript.PayToScriptHashSignatureScript(blockdag.OpTrueScript, nil)
if err != nil {
t.Fatalf("Error creating signature script: %s", err)
}
txIn := &wire.TxIn{
PreviousOutpoint: wire.Outpoint{
TxID: *originTx.TxID(),
Index: outputIndex,
},
Sequence: wire.MaxTxInSequenceNum,
SignatureScript: signatureScript,
}
txOut := wire.NewTxOut(value, blockdag.OpTrueScript)
tx := wire.NewNativeMsgTx(wire.TxVersion, []*wire.TxIn{txIn}, []*wire.TxOut{txOut})
return tx
}
func TestTxIndexConnectBlock(t *testing.T) {
blocks := make(map[daghash.Hash]*util.Block)
txIndex := NewTxIndex()
indexManager := NewManager([]Indexer{txIndex})
params := dagconfig.SimnetParams
params.BlockCoinbaseMaturity = 0
params.K = 1
config := blockdag.Config{
IndexManager: indexManager,
DAGParams: &params,
}
dag, teardown, err := blockdag.DAGSetup("TestTxIndexConnectBlock", config)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Failed to setup DAG instance: %v", err)
}
if teardown != nil {
defer teardown()
}
prepareAndProcessBlock := func(parentHashes []*daghash.Hash, transactions []*wire.MsgTx, blockName string) *wire.MsgBlock {
block, err := mining.PrepareBlockForTest(dag, &params, parentHashes, transactions, false)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: block %v got unexpected error from PrepareBlockForTest: %v", blockName, err)
}
utilBlock := util.NewBlock(block)
blocks[*block.BlockHash()] = utilBlock
isOrphan, isDelayed, err := dag.ProcessBlock(utilBlock, blockdag.BFNoPoWCheck)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: dag.ProcessBlock got unexpected error for block %v: %v", blockName, err)
}
if isDelayed {
t.Fatalf("TestTxIndexConnectBlock: block %s "+
"is too far in the future", blockName)
}
if isOrphan {
t.Fatalf("TestTxIndexConnectBlock: block %v was unexpectedly orphan", blockName)
}
return block
}
block1 := prepareAndProcessBlock([]*daghash.Hash{params.GenesisHash}, nil, "1")
block2Tx := createTransaction(t, block1.Transactions[0].TxOut[0].Value, block1.Transactions[0], 0)
block2 := prepareAndProcessBlock([]*daghash.Hash{block1.BlockHash()}, []*wire.MsgTx{block2Tx}, "2")
block3Tx := createTransaction(t, block2.Transactions[0].TxOut[0].Value, block2.Transactions[0], 0)
block3 := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3")
block2TxID := block2Tx.TxID()
block2TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
block3Hash := block3.BlockHash()
if !block2TxNewAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxNewAcceptedBlock)
}
block3TxID := block3Tx.TxID()
block3TxNewAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block3TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block3TxNewAcceptedBlock.IsEqual(&daghash.ZeroHash) {
t.Errorf("TestTxIndexConnectBlock: block3Tx should've "+
"been accepted by the virtual block but instead got accepted in block %v", block3TxNewAcceptedBlock)
}
block3A := prepareAndProcessBlock([]*daghash.Hash{block2.BlockHash()}, []*wire.MsgTx{block3Tx}, "3A")
block4 := prepareAndProcessBlock([]*daghash.Hash{block3.BlockHash()}, nil, "4")
prepareAndProcessBlock([]*daghash.Hash{block3A.BlockHash(), block4.BlockHash()}, nil, "5")
block2TxAcceptedBlock, err := txIndex.BlockThatAcceptedTx(dag, block2TxID)
if err != nil {
t.Errorf("TestTxIndexConnectBlock: TxAcceptedInBlock: %v", err)
}
if !block2TxAcceptedBlock.IsEqual(block3Hash) {
t.Errorf("TestTxIndexConnectBlock: block2Tx should've "+
"been accepted in block %v but instead got accepted in block %v", block3Hash, block2TxAcceptedBlock)
}
region, err := txIndex.TxFirstBlockRegion(block3TxID)
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: no block region was found for block3Tx")
}
regionBlock, ok := blocks[*region.Hash]
if !ok {
t.Fatalf("TestTxIndexConnectBlock: couldn't find block with hash %v", region.Hash)
}
regionBlockBytes, err := regionBlock.Bytes()
if err != nil {
t.Fatalf("TestTxIndexConnectBlock: Couldn't serialize block to bytes")
}
block3TxInBlock := regionBlockBytes[region.Offset : region.Offset+region.Len]
block3TxBuf := bytes.NewBuffer(make([]byte, 0, block3Tx.SerializeSize()))
block3Tx.KaspaEncode(block3TxBuf, 0)
blockTxBytes := block3TxBuf.Bytes()
if !reflect.DeepEqual(blockTxBytes, block3TxInBlock) {
t.Errorf("TestTxIndexConnectBlock: the block region that was in the bucket doesn't match block3Tx")
}
}

View File

@ -17,8 +17,8 @@ import (
)
const (
// blockDbNamePrefix is the prefix for the kaspad block database.
blockDbNamePrefix = "blocks"
// blockDBNamePrefix is the prefix for the kaspad block database.
blockDBNamePrefix = "blocks"
)
var (
@ -30,11 +30,11 @@ var (
// loadBlockDB opens the block database and returns a handle to it.
func loadBlockDB() (database.DB, error) {
// The database name is based on the database type.
dbName := blockDbNamePrefix + "_" + cfg.DbType
dbName := blockDBNamePrefix + "_" + cfg.DBType
dbPath := filepath.Join(cfg.DataDir, dbName)
log.Infof("Loading block database from '%s'", dbPath)
db, err := database.Open(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
db, err := database.Open(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
// Return the error if it's not because the database doesn't
// exist.
@ -50,7 +50,7 @@ func loadBlockDB() (database.DB, error) {
if err != nil {
return nil, err
}
db, err = database.Create(cfg.DbType, dbPath, ActiveConfig().NetParams().Net)
db, err = database.Create(cfg.DBType, dbPath, ActiveConfig().NetParams().Net)
if err != nil {
return nil, err
}

View File

@ -19,7 +19,7 @@ import (
)
const (
defaultDbType = "ffldb"
defaultDBType = "ffldb"
defaultDataFile = "bootstrap.dat"
defaultProgress = 10
)
@ -40,12 +40,11 @@ func ActiveConfig() *ConfigFlags {
//
// See loadConfig for details on the configuration load process.
type ConfigFlags struct {
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
AddrIndex bool `long:"addrindex" description:"Build a full address-based transaction index which makes the searchrawtransactions RPC available"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
DataDir string `short:"b" long:"datadir" description:"Location of the kaspad data directory"`
DBType string `long:"dbtype" description:"Database backend to use for the Block DAG"`
InFile string `short:"i" long:"infile" description:"File containing the block(s)"`
Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
config.NetworkFlags
}
@ -75,7 +74,7 @@ func loadConfig() (*ConfigFlags, []string, error) {
// Default config.
activeConfig = &ConfigFlags{
DataDir: defaultDataDir,
DbType: defaultDbType,
DBType: defaultDBType,
InFile: defaultDataFile,
Progress: defaultProgress,
}
@ -97,10 +96,10 @@ func loadConfig() (*ConfigFlags, []string, error) {
}
// Validate database type.
if !validDbType(activeConfig.DbType) {
if !validDbType(activeConfig.DBType) {
str := "%s: The specified database type [%s] is invalid -- " +
"supported types %s"
err := errors.Errorf(str, "loadConfig", activeConfig.DbType, strings.Join(knownDbTypes, ", "))
err := errors.Errorf(str, "loadConfig", activeConfig.DBType, strings.Join(knownDbTypes, ", "))
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err

View File

@ -6,13 +6,13 @@ package main
import (
"encoding/binary"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/pkg/errors"
"io"
"sync"
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/wire"
@ -288,28 +288,11 @@ func (bi *blockImporter) Import() chan *importResults {
// newBlockImporter returns a new importer for the provided file reader seeker
// and database.
func newBlockImporter(db database.DB, r io.ReadSeeker) (*blockImporter, error) {
// Create the transaction and address indexes if needed.
//
// CAUTION: the txindex needs to be first in the indexes array because
// the addrindex uses data from the txindex during catchup. If the
// addrindex is run first, it may not have the transactions from the
// current block indexed.
// Create the acceptance index if needed.
var indexes []indexers.Indexer
if cfg.TxIndex || cfg.AddrIndex {
// Enable transaction index if address index is enabled since it
// requires it.
if !cfg.TxIndex {
log.Infof("Transaction index enabled because it is " +
"required by the address index")
cfg.TxIndex = true
} else {
log.Info("Transaction index is enabled")
}
indexes = append(indexes, indexers.NewTxIndex())
}
if cfg.AddrIndex {
log.Info("Address index is enabled")
indexes = append(indexes, indexers.NewAddrIndex(ActiveConfig().NetParams()))
if cfg.AcceptanceIndex {
log.Info("Acceptance index is enabled")
indexes = append(indexes, indexers.NewAcceptanceIndex())
}
// Create an index manager if any of the optional indexes are enabled.

View File

@ -56,8 +56,6 @@ const (
DefaultMaxOrphanTxSize = 100000
defaultSigCacheMaxSize = 100000
sampleConfigFilename = "sample-kaspad.conf"
defaultTxIndex = false
defaultAddrIndex = false
defaultAcceptanceIndex = false
)
@ -128,11 +126,7 @@ type Flags struct {
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."`
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
DropTxIndex bool `long:"droptxindex" description:"Deletes the hash-based transaction index from the database on start up and then exits."`
AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"`
DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up and then exits."`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainByBlock RPC available"`
AcceptanceIndex bool `long:"acceptanceindex" description:"Maintain a full hash-based acceptance index which makes the getChainFromBlock RPC available"`
DropAcceptanceIndex bool `long:"dropacceptanceindex" description:"Deletes the hash-based acceptance index from the database on start up and then exits."`
RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."`
RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."`
@ -248,8 +242,6 @@ func loadConfig() (*Config, []string, error) {
MaxOrphanTxs: defaultMaxOrphanTransactions,
SigCacheMaxSize: defaultSigCacheMaxSize,
MinRelayTxFee: defaultMinRelayTxFee,
TxIndex: defaultTxIndex,
AddrIndex: defaultAddrIndex,
AcceptanceIndex: defaultAcceptanceIndex,
}
@ -631,38 +623,6 @@ func loadConfig() (*Config, []string, error) {
}
}
// --txindex and --droptxindex do not mix.
if activeConfig.TxIndex && activeConfig.DropTxIndex {
err := errors.Errorf("%s: the --txindex and --droptxindex "+
"options may not be activated at the same time",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --addrindex and --dropaddrindex do not mix.
if activeConfig.AddrIndex && activeConfig.DropAddrIndex {
err := errors.Errorf("%s: the --addrindex and --dropaddrindex "+
"options may not be activated at the same time",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --addrindex and --droptxindex do not mix.
if activeConfig.AddrIndex && activeConfig.DropTxIndex {
err := errors.Errorf("%s: the --addrindex and --droptxindex "+
"options may not be activated at the same time "+
"because the address index relies on the transaction "+
"index",
funcName)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// --acceptanceindex and --dropacceptanceindex do not mix.
if activeConfig.AcceptanceIndex && activeConfig.DropAcceptanceIndex {
err := errors.Errorf("%s: the --acceptanceindex and --dropacceptanceindex "+

View File

@ -130,25 +130,6 @@ func kaspadMain(serverChan chan<- *server.Server) error {
}
// Drop indexes and exit if requested.
//
// NOTE: The order is important here because dropping the tx index also
// drops the address index since it relies on it.
if cfg.DropAddrIndex {
if err := indexers.DropAddrIndex(db, interrupt); err != nil {
kasdLog.Errorf("%s", err)
return err
}
return nil
}
if cfg.DropTxIndex {
if err := indexers.DropTxIndex(db, interrupt); err != nil {
kasdLog.Errorf("%s", err)
return err
}
return nil
}
if cfg.DropAcceptanceIndex {
if err := indexers.DropAcceptanceIndex(db, interrupt); err != nil {
kasdLog.Errorf("%s", err)

View File

@ -13,7 +13,6 @@ import (
"time"
"github.com/kaspanet/kaspad/blockdag"
"github.com/kaspanet/kaspad/blockdag/indexers"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/logger"
"github.com/kaspanet/kaspad/mining"
@ -77,11 +76,6 @@ type Config struct {
// SigCache defines a signature cache to use.
SigCache *txscript.SigCache
// AddrIndex defines the optional address index instance to use for
// indexing the unconfirmed transactions in the memory pool.
// This can be nil if the address index is not enabled.
AddrIndex *indexers.AddrIndex
// DAG is the BlockDAG we want to use (mainly for UTXO checks)
DAG *blockdag.BlockDAG
}
@ -531,12 +525,6 @@ func (mp *TxPool) removeTransaction(tx *util.Tx, removeDependants bool, restoreI
func (mp *TxPool) removeTransactionWithDiff(tx *util.Tx, diff *blockdag.UTXODiff, restoreInputs bool) error {
txID := tx.ID()
// Remove unconfirmed address index entries associated with the
// transaction if enabled.
if mp.cfg.AddrIndex != nil {
mp.cfg.AddrIndex.RemoveUnconfirmedTx(txID)
}
err := mp.removeTransactionUTXOEntriesFromDiff(tx, diff)
if err != nil {
return errors.Errorf("could not remove UTXOEntry from diff: %s", err)
@ -712,12 +700,6 @@ func (mp *TxPool) addTransaction(tx *util.Tx, fee uint64, parentsInPool []*wire.
}
atomic.StoreInt64(&mp.lastUpdated, time.Now().Unix())
// Add unconfirmed address index entries associated with the transaction
// if enabled.
if mp.cfg.AddrIndex != nil {
mp.cfg.AddrIndex.AddUnconfirmedTx(tx, mp.mpUTXOSet)
}
return txD, nil
}

View File

@ -351,7 +351,6 @@ func newPoolHarness(t *testing.T, dagParams *dagconfig.Params, numOutputs uint32
MedianTimePast: fDAG.MedianTimePast,
CalcSequenceLockNoLock: calcSequenceLock,
SigCache: nil,
AddrIndex: nil,
}),
}

View File

@ -8,116 +8,12 @@ import (
"bytes"
"encoding/hex"
"encoding/json"
"github.com/kaspanet/kaspad/util/pointers"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// FutureGetRawTransactionResult is a future promise to deliver the result of a
// GetRawTransactionAsync RPC invocation (or an applicable error).
type FutureGetRawTransactionResult chan *response
// Receive waits for the response promised by the future and returns a
// transaction given its hash.
func (r FutureGetRawTransactionResult) Receive() (*util.Tx, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal result as a string.
var txHex string
err = json.Unmarshal(res, &txHex)
if err != nil {
return nil, err
}
// Decode the serialized transaction hex to raw bytes.
serializedTx, err := hex.DecodeString(txHex)
if err != nil {
return nil, err
}
// Deserialize the transaction and return it.
var msgTx wire.MsgTx
if err := msgTx.Deserialize(bytes.NewReader(serializedTx)); err != nil {
return nil, err
}
return util.NewTx(&msgTx), nil
}
// GetRawTransactionAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See GetRawTransaction for the blocking version and more details.
func (c *Client) GetRawTransactionAsync(txID *daghash.TxID) FutureGetRawTransactionResult {
id := ""
if txID != nil {
id = txID.String()
}
cmd := rpcmodel.NewGetRawTransactionCmd(id, pointers.Int(0))
return c.sendCmd(cmd)
}
// GetRawTransaction returns a transaction given its ID.
//
// See GetRawTransactionVerbose to obtain additional information about the
// transaction.
func (c *Client) GetRawTransaction(txID *daghash.TxID) (*util.Tx, error) {
return c.GetRawTransactionAsync(txID).Receive()
}
// FutureGetRawTransactionVerboseResult is a future promise to deliver the
// result of a GetRawTransactionVerboseAsync RPC invocation (or an applicable
// error).
type FutureGetRawTransactionVerboseResult chan *response
// Receive waits for the response promised by the future and returns information
// about a transaction given its hash.
func (r FutureGetRawTransactionVerboseResult) Receive() (*rpcmodel.TxRawResult, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal result as a gettrawtransaction result object.
var rawTxResult rpcmodel.TxRawResult
err = json.Unmarshal(res, &rawTxResult)
if err != nil {
return nil, err
}
return &rawTxResult, nil
}
// GetRawTransactionVerboseAsync returns an instance of a type that can be used
// to get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See GetRawTransactionVerbose for the blocking version and more details.
func (c *Client) GetRawTransactionVerboseAsync(txID *daghash.TxID) FutureGetRawTransactionVerboseResult {
id := ""
if txID != nil {
id = txID.String()
}
cmd := rpcmodel.NewGetRawTransactionCmd(id, pointers.Int(1))
return c.sendCmd(cmd)
}
// GetRawTransactionVerbose returns information about a transaction given
// its hash.
//
// See GetRawTransaction to obtain only the transaction already deserialized.
func (c *Client) GetRawTransactionVerbose(txID *daghash.TxID) (*rpcmodel.TxRawResult, error) {
return c.GetRawTransactionVerboseAsync(txID).Receive()
}
// FutureDecodeRawTransactionResult is a future promise to deliver the result
// of a DecodeRawTransactionAsync RPC invocation (or an applicable error).
type FutureDecodeRawTransactionResult chan *response
@ -264,126 +160,6 @@ func (c *Client) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (*daghas
return c.SendRawTransactionAsync(tx, allowHighFees).Receive()
}
// FutureSearchRawTransactionsResult is a future promise to deliver the result
// of the SearchRawTransactionsAsync RPC invocation (or an applicable error).
type FutureSearchRawTransactionsResult chan *response
// Receive waits for the response promised by the future and returns the
// found raw transactions.
func (r FutureSearchRawTransactionsResult) Receive() ([]*wire.MsgTx, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal as an array of strings.
var searchRawTxnsResult []string
err = json.Unmarshal(res, &searchRawTxnsResult)
if err != nil {
return nil, err
}
// Decode and deserialize each transaction.
msgTxns := make([]*wire.MsgTx, 0, len(searchRawTxnsResult))
for _, hexTx := range searchRawTxnsResult {
// Decode the serialized transaction hex to raw bytes.
serializedTx, err := hex.DecodeString(hexTx)
if err != nil {
return nil, err
}
// Deserialize the transaction and add it to the result slice.
var msgTx wire.MsgTx
err = msgTx.Deserialize(bytes.NewReader(serializedTx))
if err != nil {
return nil, err
}
msgTxns = append(msgTxns, &msgTx)
}
return msgTxns, nil
}
// SearchRawTransactionsAsync returns an instance of a type that can be used to
// get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See SearchRawTransactions for the blocking version and more details.
func (c *Client) SearchRawTransactionsAsync(address util.Address, skip, count int, reverse bool, filterAddrs []string) FutureSearchRawTransactionsResult {
addr := address.EncodeAddress()
verbose := pointers.Bool(false)
cmd := rpcmodel.NewSearchRawTransactionsCmd(addr, verbose, &skip, &count,
nil, &reverse, &filterAddrs)
return c.sendCmd(cmd)
}
// SearchRawTransactions returns transactions that involve the passed address.
//
// NOTE: RPC servers do not typically provide this capability unless it has
// specifically been enabled.
//
// See SearchRawTransactionsVerbose to retrieve a list of data structures with
// information about the transactions instead of the transactions themselves.
func (c *Client) SearchRawTransactions(address util.Address, skip, count int, reverse bool, filterAddrs []string) ([]*wire.MsgTx, error) {
return c.SearchRawTransactionsAsync(address, skip, count, reverse, filterAddrs).Receive()
}
// FutureSearchRawTransactionsVerboseResult is a future promise to deliver the
// result of the SearchRawTransactionsVerboseAsync RPC invocation (or an
// applicable error).
type FutureSearchRawTransactionsVerboseResult chan *response
// Receive waits for the response promised by the future and returns the
// found raw transactions.
func (r FutureSearchRawTransactionsVerboseResult) Receive() ([]*rpcmodel.SearchRawTransactionsResult, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal as an array of raw transaction results.
var result []*rpcmodel.SearchRawTransactionsResult
err = json.Unmarshal(res, &result)
if err != nil {
return nil, err
}
return result, nil
}
// SearchRawTransactionsVerboseAsync returns an instance of a type that can be
// used to get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See SearchRawTransactionsVerbose for the blocking version and more details.
func (c *Client) SearchRawTransactionsVerboseAsync(address util.Address, skip,
count int, includePrevOut, reverse bool, filterAddrs *[]string) FutureSearchRawTransactionsVerboseResult {
addr := address.EncodeAddress()
verbose := pointers.Bool(true)
var prevOut *bool
if includePrevOut {
prevOut = pointers.Bool(true)
}
cmd := rpcmodel.NewSearchRawTransactionsCmd(addr, verbose, &skip, &count,
prevOut, &reverse, filterAddrs)
return c.sendCmd(cmd)
}
// SearchRawTransactionsVerbose returns a list of data structures that describe
// transactions which involve the passed address.
//
// NOTE: RPC servers do not typically provide this capability unless it has
// specifically been enabled.
//
// See SearchRawTransactions to retrieve a list of raw transactions instead.
func (c *Client) SearchRawTransactionsVerbose(address util.Address, skip,
count int, includePrevOut, reverse bool, filterAddrs []string) ([]*rpcmodel.SearchRawTransactionsResult, error) {
return c.SearchRawTransactionsVerboseAsync(address, skip, count,
includePrevOut, reverse, &filterAddrs).Receive()
}
// FutureDecodeScriptResult is a future promise to deliver the result
// of a DecodeScriptAsync RPC invocation (or an applicable error).
type FutureDecodeScriptResult chan *response

View File

@ -406,24 +406,6 @@ func NewGetRawMempoolCmd(verbose *bool) *GetRawMempoolCmd {
}
}
// GetRawTransactionCmd defines the getRawTransaction JSON-RPC command.
type GetRawTransactionCmd struct {
TxID string
Verbose *int `jsonrpcdefault:"0"`
}
// NewGetRawTransactionCmd returns a new instance which can be used to issue a
// getRawTransaction JSON-RPC command.
//
// The parameters which are pointers indicate they are optional. Passing nil
// for optional parameters will use the default value.
func NewGetRawTransactionCmd(txID string, verbose *int) *GetRawTransactionCmd {
return &GetRawTransactionCmd{
TxID: txID,
Verbose: verbose,
}
}
// GetSubnetworkCmd defines the getSubnetwork JSON-RPC command.
type GetSubnetworkCmd struct {
SubnetworkID string
@ -491,34 +473,6 @@ func NewPingCmd() *PingCmd {
return &PingCmd{}
}
// SearchRawTransactionsCmd defines the searchRawTransactions JSON-RPC command.
type SearchRawTransactionsCmd struct {
Address string
Verbose *bool `jsonrpcdefault:"true"`
Skip *int `jsonrpcdefault:"0"`
Count *int `jsonrpcdefault:"100"`
VinExtra *bool `jsonrpcdefault:"false"`
Reverse *bool `jsonrpcdefault:"false"`
FilterAddrs *[]string
}
// NewSearchRawTransactionsCmd returns a new instance which can be used to issue a
// sendRawTransaction JSON-RPC command.
//
// The parameters which are pointers indicate they are optional. Passing nil
// for optional parameters will use the default value.
func NewSearchRawTransactionsCmd(address string, verbose *bool, skip, count *int, vinExtra, reverse *bool, filterAddrs *[]string) *SearchRawTransactionsCmd {
return &SearchRawTransactionsCmd{
Address: address,
Verbose: verbose,
Skip: skip,
Count: count,
VinExtra: vinExtra,
Reverse: reverse,
FilterAddrs: filterAddrs,
}
}
// SendRawTransactionCmd defines the sendRawTransaction JSON-RPC command.
type SendRawTransactionCmd struct {
HexTx string
@ -728,14 +682,12 @@ func init() {
MustRegisterCommand("getNetTotals", (*GetNetTotalsCmd)(nil), flags)
MustRegisterCommand("getPeerInfo", (*GetPeerInfoCmd)(nil), flags)
MustRegisterCommand("getRawMempool", (*GetRawMempoolCmd)(nil), flags)
MustRegisterCommand("getRawTransaction", (*GetRawTransactionCmd)(nil), flags)
MustRegisterCommand("getSubnetwork", (*GetSubnetworkCmd)(nil), flags)
MustRegisterCommand("getTxOut", (*GetTxOutCmd)(nil), flags)
MustRegisterCommand("getTxOutSetInfo", (*GetTxOutSetInfoCmd)(nil), flags)
MustRegisterCommand("help", (*HelpCmd)(nil), flags)
MustRegisterCommand("ping", (*PingCmd)(nil), flags)
MustRegisterCommand("removeManualNode", (*RemoveManualNodeCmd)(nil), flags)
MustRegisterCommand("searchRawTransactions", (*SearchRawTransactionsCmd)(nil), flags)
MustRegisterCommand("sendRawTransaction", (*SendRawTransactionCmd)(nil), flags)
MustRegisterCommand("stop", (*StopCmd)(nil), flags)
MustRegisterCommand("submitBlock", (*SubmitBlockCmd)(nil), flags)

View File

@ -480,34 +480,6 @@ func TestRPCServerCommands(t *testing.T) {
Verbose: pointers.Bool(false),
},
},
{
name: "getRawTransaction",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("getRawTransaction", "123")
},
staticCmd: func() interface{} {
return rpcmodel.NewGetRawTransactionCmd("123", nil)
},
marshalled: `{"jsonrpc":"1.0","method":"getRawTransaction","params":["123"],"id":1}`,
unmarshalled: &rpcmodel.GetRawTransactionCmd{
TxID: "123",
Verbose: pointers.Int(0),
},
},
{
name: "getRawTransaction optional",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("getRawTransaction", "123", 1)
},
staticCmd: func() interface{} {
return rpcmodel.NewGetRawTransactionCmd("123", pointers.Int(1))
},
marshalled: `{"jsonrpc":"1.0","method":"getRawTransaction","params":["123",1],"id":1}`,
unmarshalled: &rpcmodel.GetRawTransactionCmd{
TxID: "123",
Verbose: pointers.Int(1),
},
},
{
name: "getSubnetwork",
newCmd: func() (interface{}, error) {
@ -610,145 +582,6 @@ func TestRPCServerCommands(t *testing.T) {
marshalled: `{"jsonrpc":"1.0","method":"removeManualNode","params":["127.0.0.1"],"id":1}`,
unmarshalled: &rpcmodel.RemoveManualNodeCmd{Addr: "127.0.0.1"},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address")
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address", nil, nil, nil, nil, nil, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address"],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(true),
Skip: pointers.Int(0),
Count: pointers.Int(100),
VinExtra: pointers.Bool(false),
Reverse: pointers.Bool(false),
FilterAddrs: nil,
},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address", false)
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address",
pointers.Bool(false), nil, nil, nil, nil, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address",false],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(false),
Skip: pointers.Int(0),
Count: pointers.Int(100),
VinExtra: pointers.Bool(false),
Reverse: pointers.Bool(false),
FilterAddrs: nil,
},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address", false, 5)
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address",
pointers.Bool(false), pointers.Int(5), nil, nil, nil, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address",false,5],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(false),
Skip: pointers.Int(5),
Count: pointers.Int(100),
VinExtra: pointers.Bool(false),
Reverse: pointers.Bool(false),
FilterAddrs: nil,
},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address", false, 5, 10)
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address",
pointers.Bool(false), pointers.Int(5), pointers.Int(10), nil, nil, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address",false,5,10],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(false),
Skip: pointers.Int(5),
Count: pointers.Int(10),
VinExtra: pointers.Bool(false),
Reverse: pointers.Bool(false),
FilterAddrs: nil,
},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address", false, 5, 10, true)
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address",
pointers.Bool(false), pointers.Int(5), pointers.Int(10), pointers.Bool(true), nil, nil)
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address",false,5,10,true],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(false),
Skip: pointers.Int(5),
Count: pointers.Int(10),
VinExtra: pointers.Bool(true),
Reverse: pointers.Bool(false),
FilterAddrs: nil,
},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address", false, 5, 10, true, true)
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address",
pointers.Bool(false), pointers.Int(5), pointers.Int(10), pointers.Bool(true), pointers.Bool(true), nil)
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address",false,5,10,true,true],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(false),
Skip: pointers.Int(5),
Count: pointers.Int(10),
VinExtra: pointers.Bool(true),
Reverse: pointers.Bool(true),
FilterAddrs: nil,
},
},
{
name: "searchRawTransactions",
newCmd: func() (interface{}, error) {
return rpcmodel.NewCommand("searchRawTransactions", "1Address", false, 5, 10, true, true, []string{"1Address"})
},
staticCmd: func() interface{} {
return rpcmodel.NewSearchRawTransactionsCmd("1Address",
pointers.Bool(false), pointers.Int(5), pointers.Int(10), pointers.Bool(true), pointers.Bool(true), &[]string{"1Address"})
},
marshalled: `{"jsonrpc":"1.0","method":"searchRawTransactions","params":["1Address",false,5,10,true,true,["1Address"]],"id":1}`,
unmarshalled: &rpcmodel.SearchRawTransactionsCmd{
Address: "1Address",
Verbose: pointers.Bool(false),
Skip: pointers.Int(5),
Count: pointers.Int(10),
VinExtra: pointers.Bool(true),
Reverse: pointers.Bool(true),
FilterAddrs: &[]string{"1Address"},
},
},
{
name: "sendRawTransaction",
newCmd: func() (interface{}, error) {

View File

@ -304,9 +304,7 @@ type ScriptSig struct {
Hex string `json:"hex"`
}
// Vin models parts of the tx data. It is defined separately since
// getrawtransaction, decoderawtransaction, and searchrawtransaction use the
// same structure.
// Vin models parts of the tx data.
type Vin struct {
TxID string `json:"txId"`
Vout uint32 `json:"vout"`
@ -336,7 +334,7 @@ type PrevOut struct {
Value float64 `json:"value"`
}
// VinPrevOut is like Vin except it includes PrevOut. It is used by searchrawtransaction
// VinPrevOut is like Vin except it includes PrevOut.
type VinPrevOut struct {
Coinbase string `json:"coinbase"`
TxID string `json:"txId"`
@ -380,8 +378,7 @@ func (v *VinPrevOut) MarshalJSON() ([]byte, error) {
return json.Marshal(txStruct)
}
// Vout models parts of the tx data. It is defined separately since both
// getrawtransaction and decoderawtransaction use the same structure.
// Vout models parts of the tx data
type Vout struct {
Value uint64 `json:"value"`
N uint32 `json:"n"`
@ -411,44 +408,25 @@ type InfoDAGResult struct {
Errors string `json:"errors"`
}
// TxRawResult models the data from the getrawtransaction command.
// TxRawResult models transaction result data.
type TxRawResult struct {
Hex string `json:"hex"`
TxID string `json:"txId"`
Hash string `json:"hash,omitempty"`
Size int32 `json:"size,omitempty"`
Version int32 `json:"version"`
LockTime uint64 `json:"lockTime"`
Subnetwork string `json:"subnetwork"`
Gas uint64 `json:"gas"`
PayloadHash string `json:"payloadHash"`
Payload string `json:"payload"`
Vin []Vin `json:"vin"`
Vout []Vout `json:"vout"`
BlockHash string `json:"blockHash,omitempty"`
Confirmations *uint64 `json:"confirmations,omitempty"`
AcceptedBy *string `json:"acceptedBy,omitempty"`
IsInMempool bool `json:"isInMempool"`
Time uint64 `json:"time,omitempty"`
BlockTime uint64 `json:"blockTime,omitempty"`
}
// SearchRawTransactionsResult models the data from the searchrawtransaction
// command.
type SearchRawTransactionsResult struct {
Hex string `json:"hex,omitempty"`
TxID string `json:"txId"`
Hash string `json:"hash"`
Size string `json:"size"`
Version int32 `json:"version"`
LockTime uint64 `json:"lockTime"`
Vin []VinPrevOut `json:"vin"`
Vout []Vout `json:"vout"`
BlockHash string `json:"blockHash,omitempty"`
Confirmations *uint64 `json:"confirmations,omitempty"`
IsInMempool bool `json:"isInMempool"`
Time uint64 `json:"time,omitempty"`
Blocktime uint64 `json:"blockTime,omitempty"`
Hex string `json:"hex"`
TxID string `json:"txId"`
Hash string `json:"hash,omitempty"`
Size int32 `json:"size,omitempty"`
Version int32 `json:"version"`
LockTime uint64 `json:"lockTime"`
Subnetwork string `json:"subnetwork"`
Gas uint64 `json:"gas"`
PayloadHash string `json:"payloadHash"`
Payload string `json:"payload"`
Vin []Vin `json:"vin"`
Vout []Vout `json:"vout"`
BlockHash string `json:"blockHash,omitempty"`
AcceptedBy *string `json:"acceptedBy,omitempty"`
IsInMempool bool `json:"isInMempool"`
Time uint64 `json:"time,omitempty"`
BlockTime uint64 `json:"blockTime,omitempty"`
}
// TxRawDecodeResult models the data from the decoderawtransaction command.

View File

@ -68,28 +68,26 @@ func TestRPCServerWebsocketNotifications(t *testing.T) {
},
staticNtfn: func() interface{} {
txResult := rpcmodel.TxRawResult{
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkIDNative.String(),
Vin: nil,
Vout: nil,
Confirmations: nil,
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkIDNative.String(),
Vin: nil,
Vout: nil,
}
return rpcmodel.NewTxAcceptedVerboseNtfn(txResult)
},
marshalled: `{"jsonrpc":"1.0","method":"txAcceptedVerbose","params":[{"hex":"001122","txId":"123","version":1,"lockTime":4294967295,"subnetwork":"0000000000000000000000000000000000000000","gas":0,"payloadHash":"","payload":"","vin":null,"vout":null,"isInMempool":false}],"id":null}`,
unmarshalled: &rpcmodel.TxAcceptedVerboseNtfn{
RawTx: rpcmodel.TxRawResult{
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkIDNative.String(),
Vin: nil,
Vout: nil,
Confirmations: nil,
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkIDNative.String(),
Vin: nil,
Vout: nil,
},
},
},
@ -100,34 +98,32 @@ func TestRPCServerWebsocketNotifications(t *testing.T) {
},
staticNtfn: func() interface{} {
txResult := rpcmodel.TxRawResult{
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkID{45, 67}.String(),
PayloadHash: daghash.DoubleHashP([]byte("102030")).String(),
Payload: "102030",
Gas: 10,
Vin: nil,
Vout: nil,
Confirmations: nil,
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkID{45, 67}.String(),
PayloadHash: daghash.DoubleHashP([]byte("102030")).String(),
Payload: "102030",
Gas: 10,
Vin: nil,
Vout: nil,
}
return rpcmodel.NewTxAcceptedVerboseNtfn(txResult)
},
marshalled: `{"jsonrpc":"1.0","method":"txAcceptedVerbose","params":[{"hex":"001122","txId":"123","version":1,"lockTime":4294967295,"subnetwork":"000000000000000000000000000000000000432d","gas":10,"payloadHash":"bf8ccdb364499a3e628200c3d3512c2c2a43b7a7d4f1a40d7f716715e449f442","payload":"102030","vin":null,"vout":null,"isInMempool":false}],"id":null}`,
unmarshalled: &rpcmodel.TxAcceptedVerboseNtfn{
RawTx: rpcmodel.TxRawResult{
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkID{45, 67}.String(),
PayloadHash: daghash.DoubleHashP([]byte("102030")).String(),
Payload: "102030",
Gas: 10,
Vin: nil,
Vout: nil,
Confirmations: nil,
Hex: "001122",
TxID: "123",
Version: 1,
LockTime: 4294967295,
Subnetwork: subnetworkid.SubnetworkID{45, 67}.String(),
PayloadHash: daghash.DoubleHashP([]byte("102030")).String(),
Payload: "102030",
Gas: 10,
Vin: nil,
Vout: nil,
},
},
},

View File

@ -253,8 +253,6 @@ type Server struct {
// if the associated index is not enabled. These fields are set during
// initial creation of the server and never changed afterwards, so they
// do not need to be protected for concurrent access.
TxIndex *indexers.TxIndex
AddrIndex *indexers.AddrIndex
AcceptanceIndex *indexers.AcceptanceIndex
notifyNewTransactions func(txns []*mempool.TxDesc)
@ -1558,32 +1556,8 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
notifyNewTransactions: notifyNewTransactions,
}
// Create the transaction and address indexes if needed.
//
// CAUTION: the txindex needs to be first in the indexes array because
// the addrindex uses data from the txindex during catchup. If the
// addrindex is run first, it may not have the transactions from the
// current block indexed.
// Create indexes if needed.
var indexes []indexers.Indexer
if config.ActiveConfig().TxIndex || config.ActiveConfig().AddrIndex {
// Enable transaction index if address index is enabled since it
// requires it.
if !config.ActiveConfig().TxIndex {
indxLog.Infof("Transaction index enabled because it " +
"is required by the address index")
config.ActiveConfig().TxIndex = true
} else {
indxLog.Info("Transaction index is enabled")
}
s.TxIndex = indexers.NewTxIndex()
indexes = append(indexes, s.TxIndex)
}
if config.ActiveConfig().AddrIndex {
indxLog.Info("Address index is enabled")
s.AddrIndex = indexers.NewAddrIndex(dagParams)
indexes = append(indexes, s.AddrIndex)
}
if config.ActiveConfig().AcceptanceIndex {
indxLog.Info("acceptance index is enabled")
s.AcceptanceIndex = indexers.NewAcceptanceIndex()
@ -1626,7 +1600,6 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
},
IsDeploymentActive: s.DAG.IsDeploymentActive,
SigCache: s.SigCache,
AddrIndex: s.AddrIndex,
DAG: s.DAG,
}
s.TxMemPool = mempool.New(&txC)

View File

@ -11,7 +11,6 @@ import (
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/pointers"
"github.com/kaspanet/kaspad/wire"
"github.com/pkg/errors"
"math/big"
"strconv"
)
@ -142,7 +141,7 @@ func createVoutList(mtx *wire.MsgTx, dagParams *dagconfig.Params, filterAddrMap
// to a raw transaction JSON object.
func createTxRawResult(dagParams *dagconfig.Params, mtx *wire.MsgTx,
txID string, blkHeader *wire.BlockHeader, blkHash string,
acceptingBlock *daghash.Hash, confirmations *uint64, isInMempool bool) (*rpcmodel.TxRawResult, error) {
acceptingBlock *daghash.Hash, isInMempool bool) (*rpcmodel.TxRawResult, error) {
mtxHex, err := messageToHex(mtx)
if err != nil {
@ -175,7 +174,6 @@ func createTxRawResult(dagParams *dagconfig.Params, mtx *wire.MsgTx,
txReply.BlockHash = blkHash
}
txReply.Confirmations = confirmations
txReply.IsInMempool = isInMempool
if acceptingBlock != nil {
txReply.AcceptedBy = pointers.String(acceptingBlock.String())
@ -280,7 +278,7 @@ func buildGetBlockVerboseResult(s *Server, block *util.Block, isVerboseTx bool)
rawTxns := make([]rpcmodel.TxRawResult, len(txns))
for i, tx := range txns {
rawTxn, err := createTxRawResult(params, tx.MsgTx(), tx.ID().String(),
&blockHeader, hash.String(), nil, nil, false)
&blockHeader, hash.String(), nil, false)
if err != nil {
return nil, err
}
@ -352,36 +350,3 @@ func hashesToGetBlockVerboseResults(s *Server, hashes []*daghash.Hash) ([]rpcmod
}
return getBlockVerboseResults, nil
}
// txConfirmationsNoLock returns the confirmations number for the given transaction
// The confirmations number is defined as follows:
// If the transaction is in the mempool/in a red block/is a double spend -> 0
// Otherwise -> The confirmations number of the accepting block
//
// This function MUST be called with the DAG state lock held (for reads).
func txConfirmationsNoLock(s *Server, txID *daghash.TxID) (uint64, error) {
if s.cfg.TxIndex == nil {
return 0, errors.New("transaction index must be enabled (--txindex)")
}
acceptingBlock, err := s.cfg.TxIndex.BlockThatAcceptedTx(s.cfg.DAG, txID)
if err != nil {
return 0, errors.Errorf("could not get block that accepted tx %s: %s", txID, err)
}
if acceptingBlock == nil {
return 0, nil
}
confirmations, err := s.cfg.DAG.BlockConfirmationsByHashNoLock(acceptingBlock)
if err != nil {
return 0, errors.Errorf("could not get confirmations for block that accepted tx %s: %s", txID, err)
}
return confirmations, nil
}
func txConfirmations(s *Server, txID *daghash.TxID) (uint64, error) {
s.cfg.DAG.RLock()
defer s.cfg.DAG.RUnlock()
return txConfirmationsNoLock(s, txID)
}

View File

@ -1,140 +0,0 @@
package rpc
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/wire"
)
// handleGetRawTransaction implements the getRawTransaction command.
func handleGetRawTransaction(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
c := cmd.(*rpcmodel.GetRawTransactionCmd)
// Convert the provided transaction hash hex to a Hash.
txID, err := daghash.NewTxIDFromStr(c.TxID)
if err != nil {
return nil, rpcDecodeHexError(c.TxID)
}
verbose := false
if c.Verbose != nil {
verbose = *c.Verbose != 0
}
// Try to fetch the transaction from the memory pool and if that fails,
// try the block database.
var tx *util.Tx
var blkHash *daghash.Hash
isInMempool := false
mempoolTx, err := s.cfg.TxMemPool.FetchTransaction(txID)
if err != nil {
if s.cfg.TxIndex == nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCNoTxInfo,
Message: "The transaction index must be " +
"enabled to query the blockDAG " +
"(specify --txindex)",
}
}
txBytes, txBlockHash, err := fetchTxBytesAndBlockHashFromTxIndex(s, txID)
if err != nil {
return nil, err
}
// When the verbose flag isn't set, simply return the serialized
// transaction as a hex-encoded string. This is done here to
// avoid deserializing it only to reserialize it again later.
if !verbose {
return hex.EncodeToString(txBytes), nil
}
// Grab the block hash.
blkHash = txBlockHash
// Deserialize the transaction
var mtx wire.MsgTx
err = mtx.Deserialize(bytes.NewReader(txBytes))
if err != nil {
context := "Failed to deserialize transaction"
return nil, internalRPCError(err.Error(), context)
}
tx = util.NewTx(&mtx)
} else {
// When the verbose flag isn't set, simply return the
// network-serialized transaction as a hex-encoded string.
if !verbose {
// Note that this is intentionally not directly
// returning because the first return value is a
// string and it would result in returning an empty
// string to the client instead of nothing (nil) in the
// case of an error.
mtxHex, err := messageToHex(mempoolTx.MsgTx())
if err != nil {
return nil, err
}
return mtxHex, nil
}
tx = mempoolTx
isInMempool = true
}
// The verbose flag is set, so generate the JSON object and return it.
var blkHeader *wire.BlockHeader
var blkHashStr string
if blkHash != nil {
// Fetch the header from DAG.
header, err := s.cfg.DAG.HeaderByHash(blkHash)
if err != nil {
context := "Failed to fetch block header"
return nil, internalRPCError(err.Error(), context)
}
blkHeader = header
blkHashStr = blkHash.String()
}
var confirmations uint64
if !isInMempool {
confirmations, err = txConfirmations(s, tx.ID())
if err != nil {
return nil, err
}
}
rawTxn, err := createTxRawResult(s.cfg.DAGParams, tx.MsgTx(), txID.String(),
blkHeader, blkHashStr, nil, &confirmations, isInMempool)
if err != nil {
return nil, err
}
return *rawTxn, nil
}
func fetchTxBytesAndBlockHashFromTxIndex(s *Server, txID *daghash.TxID) ([]byte, *daghash.Hash, error) {
blockRegion, err := s.cfg.TxIndex.TxFirstBlockRegion(txID)
if err != nil {
context := "Failed to retrieve transaction location"
return nil, nil, internalRPCError(err.Error(), context)
}
if blockRegion == nil {
return nil, nil, rpcNoTxInfoError(txID)
}
// Load the raw transaction bytes from the database.
var txBytes []byte
err = s.cfg.DB.View(func(dbTx database.Tx) error {
var err error
txBytes, err = dbTx.FetchBlockRegion(blockRegion)
return err
})
if err != nil {
return nil, nil, rpcNoTxInfoError(txID)
}
return txBytes, blockRegion.Hash, nil
}

View File

@ -78,14 +78,13 @@ func handleGetTxOut(s *Server, cmd interface{}, closeChan <-chan struct{}) (inte
return nil, nil
}
if s.cfg.TxIndex != nil {
txConfirmations, err := txConfirmations(s, txID)
if err != nil {
return nil, internalRPCError("Output index number (vout) does not "+
"exist for transaction.", "")
}
confirmations = &txConfirmations
utxoConfirmations, ok := s.cfg.DAG.UTXOConfirmations(&out)
if !ok {
errStr := fmt.Sprintf("Cannot get confirmations for tx id %s, index %d",
out.TxID, out.Index)
return nil, internalRPCError(errStr, "")
}
confirmations = &utxoConfirmations
selectedTipHash = s.cfg.DAG.SelectedTipHash().String()
value = entry.Amount()

View File

@ -1,473 +0,0 @@
package rpc
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kaspanet/kaspad/dagconfig"
"github.com/kaspanet/kaspad/database"
"github.com/kaspanet/kaspad/rpcmodel"
"github.com/kaspanet/kaspad/txscript"
"github.com/kaspanet/kaspad/util"
"github.com/kaspanet/kaspad/util/daghash"
"github.com/kaspanet/kaspad/util/pointers"
"github.com/kaspanet/kaspad/wire"
)
// retrievedTx represents a transaction that was either loaded from the
// transaction memory pool or from the database. When a transaction is loaded
// from the database, it is loaded with the raw serialized bytes while the
// mempool has the fully deserialized structure. This structure therefore will
// have one of the two fields set depending on where is was retrieved from.
// This is mainly done for efficiency to avoid extra serialization steps when
// possible.
type retrievedTx struct {
txBytes []byte
blkHash *daghash.Hash // Only set when transaction is in a block.
tx *util.Tx
}
// handleSearchRawTransactions implements the searchRawTransactions command.
func handleSearchRawTransactions(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
// Respond with an error if the address index is not enabled.
addrIndex := s.cfg.AddrIndex
if addrIndex == nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCMisc,
Message: "Address index must be enabled (--addrindex)",
}
}
// Override the flag for including extra previous output information in
// each input if needed.
c := cmd.(*rpcmodel.SearchRawTransactionsCmd)
vinExtra := false
if c.VinExtra != nil {
vinExtra = *c.VinExtra
}
// Including the extra previous output information requires the
// transaction index. Currently the address index relies on the
// transaction index, so this check is redundant, but it's better to be
// safe in case the address index is ever changed to not rely on it.
if vinExtra && s.cfg.TxIndex == nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCMisc,
Message: "Transaction index must be enabled (--txindex)",
}
}
// Attempt to decode the supplied address.
params := s.cfg.DAGParams
addr, err := util.DecodeAddress(c.Address, params.Prefix)
if err != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCInvalidAddressOrKey,
Message: "Invalid address or key: " + err.Error(),
}
}
// Override the default number of requested entries if needed. Also,
// just return now if the number of requested entries is zero to avoid
// extra work.
numRequested := 100
if c.Count != nil {
numRequested = *c.Count
if numRequested < 0 {
numRequested = 1
}
}
if numRequested == 0 {
return nil, nil
}
// Override the default number of entries to skip if needed.
var numToSkip int
if c.Skip != nil {
numToSkip = *c.Skip
if numToSkip < 0 {
numToSkip = 0
}
}
// Override the reverse flag if needed.
var reverse bool
if c.Reverse != nil {
reverse = *c.Reverse
}
// Add transactions from mempool first if client asked for reverse
// order. Otherwise, they will be added last (as needed depending on
// the requested counts).
//
// NOTE: This code doesn't sort by dependency. This might be something
// to do in the future for the client's convenience, or leave it to the
// client.
numSkipped := uint32(0)
addressTxns := make([]retrievedTx, 0, numRequested)
if reverse {
// Transactions in the mempool are not in a block header yet,
// so the block header field in the retieved transaction struct
// is left nil.
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
uint32(numToSkip), uint32(numRequested))
numSkipped += mpSkipped
for _, tx := range mpTxns {
addressTxns = append(addressTxns, retrievedTx{tx: tx})
}
}
// Fetch transactions from the database in the desired order if more are
// needed.
if len(addressTxns) < numRequested {
err = s.cfg.DB.View(func(dbTx database.Tx) error {
regions, dbSkipped, err := addrIndex.TxRegionsForAddress(
dbTx, addr, uint32(numToSkip)-numSkipped,
uint32(numRequested-len(addressTxns)), reverse)
if err != nil {
return err
}
// Load the raw transaction bytes from the database.
serializedTxns, err := dbTx.FetchBlockRegions(regions)
if err != nil {
return err
}
// Add the transaction and the hash of the block it is
// contained in to the list. Note that the transaction
// is left serialized here since the caller might have
// requested non-verbose output and hence there would be
// no point in deserializing it just to reserialize it
// later.
for i, serializedTx := range serializedTxns {
addressTxns = append(addressTxns, retrievedTx{
txBytes: serializedTx,
blkHash: regions[i].Hash,
})
}
numSkipped += dbSkipped
return nil
})
if err != nil {
context := "Failed to load address index entries"
return nil, internalRPCError(err.Error(), context)
}
}
// Add transactions from mempool last if client did not request reverse
// order and the number of results is still under the number requested.
if !reverse && len(addressTxns) < numRequested {
// Transactions in the mempool are not in a block header yet,
// so the block header field in the retieved transaction struct
// is left nil.
mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr,
uint32(numToSkip)-numSkipped, uint32(numRequested-
len(addressTxns)))
numSkipped += mpSkipped
for _, tx := range mpTxns {
addressTxns = append(addressTxns, retrievedTx{tx: tx})
}
}
// Address has never been used if neither source yielded any results.
if len(addressTxns) == 0 {
return []rpcmodel.SearchRawTransactionsResult{}, nil
}
// Serialize all of the transactions to hex.
hexTxns := make([]string, len(addressTxns))
for i := range addressTxns {
// Simply encode the raw bytes to hex when the retrieved
// transaction is already in serialized form.
rtx := &addressTxns[i]
if rtx.txBytes != nil {
hexTxns[i] = hex.EncodeToString(rtx.txBytes)
continue
}
// Serialize the transaction first and convert to hex when the
// retrieved transaction is the deserialized structure.
hexTxns[i], err = messageToHex(rtx.tx.MsgTx())
if err != nil {
return nil, err
}
}
// When not in verbose mode, simply return a list of serialized txns.
if c.Verbose != nil && !*c.Verbose {
return hexTxns, nil
}
// Normalize the provided filter addresses (if any) to ensure there are
// no duplicates.
filterAddrMap := make(map[string]struct{})
if c.FilterAddrs != nil && len(*c.FilterAddrs) > 0 {
for _, addr := range *c.FilterAddrs {
filterAddrMap[addr] = struct{}{}
}
}
// The verbose flag is set, so generate the JSON object and return it.
srtList := make([]rpcmodel.SearchRawTransactionsResult, len(addressTxns))
for i := range addressTxns {
// The deserialized transaction is needed, so deserialize the
// retrieved transaction if it's in serialized form (which will
// be the case when it was lookup up from the database).
// Otherwise, use the existing deserialized transaction.
rtx := &addressTxns[i]
var mtx *wire.MsgTx
if rtx.tx == nil {
// Deserialize the transaction.
mtx = new(wire.MsgTx)
err := mtx.Deserialize(bytes.NewReader(rtx.txBytes))
if err != nil {
context := "Failed to deserialize transaction"
return nil, internalRPCError(err.Error(),
context)
}
} else {
mtx = rtx.tx.MsgTx()
}
result := &srtList[i]
result.Hex = hexTxns[i]
result.TxID = mtx.TxID().String()
result.Vin, err = createVinListPrevOut(s, mtx, params, vinExtra,
filterAddrMap)
if err != nil {
return nil, err
}
result.Vout = createVoutList(mtx, params, filterAddrMap)
result.Version = mtx.Version
result.LockTime = mtx.LockTime
// Transactions grabbed from the mempool aren't yet in a block,
// so conditionally fetch block details here. This will be
// reflected in the final JSON output (mempool won't have
// confirmations or block information).
var blkHeader *wire.BlockHeader
var blkHashStr string
if blkHash := rtx.blkHash; blkHash != nil {
// Fetch the header from DAG.
header, err := s.cfg.DAG.HeaderByHash(blkHash)
if err != nil {
return nil, &rpcmodel.RPCError{
Code: rpcmodel.ErrRPCBlockNotFound,
Message: "Block not found",
}
}
blkHeader = header
blkHashStr = blkHash.String()
}
// Add the block information to the result if there is any.
if blkHeader != nil {
result.Time = uint64(blkHeader.Timestamp.Unix())
result.Blocktime = uint64(blkHeader.Timestamp.Unix())
result.BlockHash = blkHashStr
}
// rtx.tx is only set when the transaction was retrieved from the mempool
result.IsInMempool = rtx.tx != nil
if s.cfg.TxIndex != nil && !result.IsInMempool {
confirmations, err := txConfirmations(s, mtx.TxID())
if err != nil {
context := "Failed to obtain block confirmations"
return nil, internalRPCError(err.Error(), context)
}
result.Confirmations = &confirmations
}
}
return srtList, nil
}
// createVinListPrevOut returns a slice of JSON objects for the inputs of the
// passed transaction.
func createVinListPrevOut(s *Server, mtx *wire.MsgTx, dagParams *dagconfig.Params, vinExtra bool, filterAddrMap map[string]struct{}) ([]rpcmodel.VinPrevOut, error) {
// Use a dynamically sized list to accommodate the address filter.
vinList := make([]rpcmodel.VinPrevOut, 0, len(mtx.TxIn))
// Lookup all of the referenced transaction outputs needed to populate the
// previous output information if requested. Coinbase transactions do not contain
// valid inputs: block hash instead of transaction ID.
var originOutputs map[wire.Outpoint]wire.TxOut
if !mtx.IsCoinBase() && (vinExtra || len(filterAddrMap) > 0) {
var err error
originOutputs, err = fetchInputTxos(s, mtx)
if err != nil {
return nil, err
}
}
for _, txIn := range mtx.TxIn {
// The disassembled string will contain [error] inline
// if the script doesn't fully parse, so ignore the
// error here.
disbuf, _ := txscript.DisasmString(txIn.SignatureScript)
// Create the basic input entry without the additional optional
// previous output details which will be added later if
// requested and available.
prevOut := &txIn.PreviousOutpoint
vinEntry := rpcmodel.VinPrevOut{
TxID: prevOut.TxID.String(),
Vout: prevOut.Index,
Sequence: txIn.Sequence,
ScriptSig: &rpcmodel.ScriptSig{
Asm: disbuf,
Hex: hex.EncodeToString(txIn.SignatureScript),
},
}
// Add the entry to the list now if it already passed the filter
// since the previous output might not be available.
passesFilter := len(filterAddrMap) == 0
if passesFilter {
vinList = append(vinList, vinEntry)
}
// Only populate previous output information if requested and
// available.
if len(originOutputs) == 0 {
continue
}
originTxOut, ok := originOutputs[*prevOut]
if !ok {
continue
}
// Ignore the error here since an error means the script
// couldn't parse and there is no additional information about
// it anyways.
_, addr, _ := txscript.ExtractScriptPubKeyAddress(
originTxOut.ScriptPubKey, dagParams)
var encodedAddr *string
if addr != nil {
// Encode the address while checking if the address passes the
// filter when needed.
encodedAddr = pointers.String(addr.EncodeAddress())
// If the filter doesn't already pass, make it pass if
// the address exists in the filter.
if _, exists := filterAddrMap[*encodedAddr]; exists {
passesFilter = true
}
}
// Ignore the entry if it doesn't pass the filter.
if !passesFilter {
continue
}
// Add entry to the list if it wasn't already done above.
if len(filterAddrMap) != 0 {
vinList = append(vinList, vinEntry)
}
// Update the entry with previous output information if
// requested.
if vinExtra {
vinListEntry := &vinList[len(vinList)-1]
vinListEntry.PrevOut = &rpcmodel.PrevOut{
Address: encodedAddr,
Value: util.Amount(originTxOut.Value).ToKAS(),
}
}
}
return vinList, nil
}
// fetchInputTxos fetches the outpoints from all transactions referenced by the
// inputs to the passed transaction by checking the transaction mempool first
// then the transaction index for those already mined into blocks.
func fetchInputTxos(s *Server, tx *wire.MsgTx) (map[wire.Outpoint]wire.TxOut, error) {
mp := s.cfg.TxMemPool
originOutputs := make(map[wire.Outpoint]wire.TxOut)
for txInIndex, txIn := range tx.TxIn {
// Attempt to fetch and use the referenced transaction from the
// memory pool.
origin := &txIn.PreviousOutpoint
originTx, err := mp.FetchTransaction(&origin.TxID)
if err == nil {
txOuts := originTx.MsgTx().TxOut
if origin.Index >= uint32(len(txOuts)) {
errStr := fmt.Sprintf("unable to find output "+
"%s referenced from transaction %s:%d",
origin, tx.TxID(), txInIndex)
return nil, internalRPCError(errStr, "")
}
originOutputs[*origin] = *txOuts[origin.Index]
continue
}
// Look up the location of the transaction.
blockRegion, err := s.cfg.TxIndex.TxFirstBlockRegion(&origin.TxID)
if err != nil {
context := "Failed to retrieve transaction location"
return nil, internalRPCError(err.Error(), context)
}
if blockRegion == nil {
return nil, rpcNoTxInfoError(&origin.TxID)
}
// Load the raw transaction bytes from the database.
var txBytes []byte
err = s.cfg.DB.View(func(dbTx database.Tx) error {
var err error
txBytes, err = dbTx.FetchBlockRegion(blockRegion)
return err
})
if err != nil {
return nil, rpcNoTxInfoError(&origin.TxID)
}
// Deserialize the transaction
var msgTx wire.MsgTx
err = msgTx.Deserialize(bytes.NewReader(txBytes))
if err != nil {
context := "Failed to deserialize transaction"
return nil, internalRPCError(err.Error(), context)
}
// Add the referenced output to the map.
if origin.Index >= uint32(len(msgTx.TxOut)) {
errStr := fmt.Sprintf("unable to find output %s "+
"referenced from transaction %s:%d", origin,
tx.TxID(), txInIndex)
return nil, internalRPCError(errStr, "")
}
originOutputs[*origin] = *msgTx.TxOut[origin.Index]
}
return originOutputs, nil
}
// fetchMempoolTxnsForAddress queries the address index for all unconfirmed
// transactions that involve the provided address. The results will be limited
// by the number to skip and the number requested.
func fetchMempoolTxnsForAddress(s *Server, addr util.Address, numToSkip, numRequested uint32) ([]*util.Tx, uint32) {
// There are no entries to return when there are less available than the
// number being skipped.
mpTxns := s.cfg.AddrIndex.UnconfirmedTxnsForAddress(addr)
numAvailable := uint32(len(mpTxns))
if numToSkip > numAvailable {
return nil, numAvailable
}
// Filter the available entries based on the number to skip and number
// requested.
rangeEnd := numToSkip + numRequested
if rangeEnd > numAvailable {
rangeEnd = numAvailable
}
return mpTxns[numToSkip:rangeEnd], numToSkip
}

View File

@ -85,14 +85,12 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"getNetTotals": handleGetNetTotals,
"getPeerInfo": handleGetPeerInfo,
"getRawMempool": handleGetRawMempool,
"getRawTransaction": handleGetRawTransaction,
"getSubnetwork": handleGetSubnetwork,
"getTxOut": handleGetTxOut,
"help": handleHelp,
"node": handleNode,
"ping": handlePing,
"removeManualNode": handleRemoveManualNode,
"searchRawTransactions": handleSearchRawTransactions,
"sendRawTransaction": handleSendRawTransaction,
"stop": handleStop,
"submitBlock": handleSubmitBlock,
@ -124,31 +122,29 @@ var rpcLimited = map[string]struct{}{
"help": {},
// HTTP/S-only commands
"createRawTransaction": {},
"decodeRawTransaction": {},
"decodeScript": {},
"getSelectedTip": {},
"getSelectedTipHash": {},
"getBlock": {},
"getBlocks": {},
"getBlockCount": {},
"getBlockHash": {},
"getBlockHeader": {},
"getChainFromBlock": {},
"getCurrentNet": {},
"getDifficulty": {},
"getHeaders": {},
"getInfo": {},
"getNetTotals": {},
"getRawMempool": {},
"getRawTransaction": {},
"getTxOut": {},
"searchRawTransactions": {},
"sendRawTransaction": {},
"submitBlock": {},
"uptime": {},
"validateAddress": {},
"version": {},
"createRawTransaction": {},
"decodeRawTransaction": {},
"decodeScript": {},
"getSelectedTip": {},
"getSelectedTipHash": {},
"getBlock": {},
"getBlocks": {},
"getBlockCount": {},
"getBlockHash": {},
"getBlockHeader": {},
"getChainFromBlock": {},
"getCurrentNet": {},
"getDifficulty": {},
"getHeaders": {},
"getInfo": {},
"getNetTotals": {},
"getRawMempool": {},
"getTxOut": {},
"sendRawTransaction": {},
"submitBlock": {},
"uptime": {},
"validateAddress": {},
"version": {},
}
// handleUnimplemented is the handler for commands that should ultimately be
@ -787,8 +783,6 @@ type rpcserverConfig struct {
// These fields define any optional indexes the RPC server can make use
// of to provide additional data when queried.
TxIndex *indexers.TxIndex
AddrIndex *indexers.AddrIndex
AcceptanceIndex *indexers.AcceptanceIndex
shouldMineOnGenesis func() bool
@ -870,8 +864,6 @@ func NewRPCServer(
DB: db,
TxMemPool: p2pServer.TxMemPool,
Generator: blockTemplateGenerator,
TxIndex: p2pServer.TxIndex,
AddrIndex: p2pServer.AddrIndex,
AcceptanceIndex: p2pServer.AcceptanceIndex,
DAG: p2pServer.DAG,
shouldMineOnGenesis: p2pServer.ShouldMineOnGenesis,

View File

@ -211,40 +211,24 @@ var helpDescsEnUS = map[string]string{
"-status": "A bool which indicates if the soft fork is active",
// TxRawResult help.
"txRawResult-hex": "Hex-encoded transaction",
"txRawResult-txId": "The hash of the transaction",
"txRawResult-version": "The transaction version",
"txRawResult-lockTime": "The transaction lock time",
"txRawResult-subnetwork": "The transaction subnetwork",
"txRawResult-gas": "The transaction gas",
"txRawResult-mass": "The transaction mass",
"txRawResult-payloadHash": "The transaction payload hash",
"txRawResult-payload": "The transaction payload",
"txRawResult-vin": "The transaction inputs as JSON objects",
"txRawResult-vout": "The transaction outputs as JSON objects",
"txRawResult-blockHash": "Hash of the block the transaction is part of",
"txRawResult-confirmations": "Number of confirmations of the block (Will be 'null' if txindex is not disabled)",
"txRawResult-isInMempool": "Whether the transaction is in the mempool",
"txRawResult-time": "Transaction time in seconds since 1 Jan 1970 GMT",
"txRawResult-blockTime": "Block time in seconds since the 1 Jan 1970 GMT",
"txRawResult-size": "The size of the transaction in bytes",
"txRawResult-hash": "The wtxid of the transaction",
"txRawResult-acceptedBy": "The block in which the transaction got accepted in (Will be 'null' if txindex is not disabled)",
// SearchRawTransactionsResult help.
"searchRawTransactionsResult-hex": "Hex-encoded transaction",
"searchRawTransactionsResult-txId": "The hash of the transaction",
"searchRawTransactionsResult-hash": "The wxtid of the transaction",
"searchRawTransactionsResult-version": "The transaction version",
"searchRawTransactionsResult-lockTime": "The transaction lock time",
"searchRawTransactionsResult-vin": "The transaction inputs as JSON objects",
"searchRawTransactionsResult-vout": "The transaction outputs as JSON objects",
"searchRawTransactionsResult-blockHash": "Hash of the block the transaction is part of",
"searchRawTransactionsResult-confirmations": "Number of confirmations of the block (Will be 'null' if txindex is not disabled)",
"searchRawTransactionsResult-isInMempool": "Whether the transaction is in the mempool",
"searchRawTransactionsResult-time": "Transaction time in seconds since 1 Jan 1970 GMT",
"searchRawTransactionsResult-blockTime": "Block time in seconds since the 1 Jan 1970 GMT",
"searchRawTransactionsResult-size": "The size of the transaction in bytes",
"txRawResult-hex": "Hex-encoded transaction",
"txRawResult-txId": "The hash of the transaction",
"txRawResult-version": "The transaction version",
"txRawResult-lockTime": "The transaction lock time",
"txRawResult-subnetwork": "The transaction subnetwork",
"txRawResult-gas": "The transaction gas",
"txRawResult-mass": "The transaction mass",
"txRawResult-payloadHash": "The transaction payload hash",
"txRawResult-payload": "The transaction payload",
"txRawResult-vin": "The transaction inputs as JSON objects",
"txRawResult-vout": "The transaction outputs as JSON objects",
"txRawResult-blockHash": "Hash of the block the transaction is part of",
"txRawResult-isInMempool": "Whether the transaction is in the mempool",
"txRawResult-time": "Transaction time in seconds since 1 Jan 1970 GMT",
"txRawResult-blockTime": "Block time in seconds since the 1 Jan 1970 GMT",
"txRawResult-size": "The size of the transaction in bytes",
"txRawResult-hash": "The wtxid of the transaction",
"txRawResult-acceptedBy": "The block in which the transaction got accepted in",
// GetBlockVerboseResult help.
"getBlockVerboseResult-hash": "The hash of the block (same as provided)",
@ -461,14 +445,6 @@ var helpDescsEnUS = map[string]string{
"getRawMempool--condition1": "verbose=true",
"getRawMempool--result0": "Array of transaction hashes",
// GetRawTransactionCmd help.
"getRawTransaction--synopsis": "Returns information about a transaction given its hash.",
"getRawTransaction-txId": "The hash of the transaction",
"getRawTransaction-verbose": "Specifies the transaction is returned as a JSON object instead of a hex-encoded string",
"getRawTransaction--condition0": "verbose=false",
"getRawTransaction--condition1": "verbose=true",
"getRawTransaction--result0": "Hex-encoded bytes of the serialized transaction",
// GetSubnetworkCmd help.
"getSubnetwork--synopsis": "Returns information about a subnetwork given its ID.",
"getSubnetwork-subnetworkId": "The ID of the subnetwork",
@ -478,7 +454,7 @@ var helpDescsEnUS = map[string]string{
// GetTxOutResult help.
"getTxOutResult-selectedTip": "The block hash that contains the transaction output",
"getTxOutResult-confirmations": "The number of confirmations (Will be 'null' if txindex is not disabled)",
"getTxOutResult-confirmations": "The number of confirmations",
"getTxOutResult-isInMempool": "Whether the transaction is in the mempool",
"getTxOutResult-value": "The transaction amount in KAS",
"getTxOutResult-scriptPubKey": "The public key script used to pay coins as a JSON object",
@ -507,23 +483,6 @@ var helpDescsEnUS = map[string]string{
"removeManualNode--synopsis": "Removes a peer from the manual nodes list",
"removeManualNode-addr": "IP address and port of the peer to remove",
// SearchRawTransactionsCmd help.
"searchRawTransactions--synopsis": "Returns raw data for transactions involving the passed address.\n" +
"Returned transactions are pulled from both the database, and transactions currently in the mempool.\n" +
"Transactions pulled from the mempool will have the 'confirmations' field set to 0.\n" +
"Usage of this RPC requires the optional --addrindex flag to be activated, otherwise all responses will simply return with an error stating the address index has not yet been built.\n" +
"Similarly, until the address index has caught up with the current best height, all requests will return an error response in order to avoid serving stale data.",
"searchRawTransactions-address": "The kaspa address to search for",
"searchRawTransactions-verbose": "Specifies the transaction is returned as a JSON object instead of hex-encoded string",
"searchRawTransactions--condition0": "verbose=0",
"searchRawTransactions--condition1": "verbose=1",
"searchRawTransactions-skip": "The number of leading transactions to leave out of the final response",
"searchRawTransactions-count": "The maximum number of transactions to return",
"searchRawTransactions-vinExtra": "Specify that extra data from previous output will be returned in vin",
"searchRawTransactions-reverse": "Specifies that the transactions should be returned in reverse chronological order",
"searchRawTransactions-filterAddrs": "Address list. Only inputs or outputs with matching address will be returned",
"searchRawTransactions--result0": "Hex-encoded serialized transaction",
// SendRawTransactionCmd help.
"sendRawTransaction--synopsis": "Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.",
"sendRawTransaction-hexTx": "Serialized, hex-encoded signed transaction",
@ -647,14 +606,12 @@ var rpcResultTypes = map[string][]interface{}{
"getNetTotals": {(*rpcmodel.GetNetTotalsResult)(nil)},
"getPeerInfo": {(*[]rpcmodel.GetPeerInfoResult)(nil)},
"getRawMempool": {(*[]string)(nil), (*rpcmodel.GetRawMempoolVerboseResult)(nil)},
"getRawTransaction": {(*string)(nil), (*rpcmodel.TxRawResult)(nil)},
"getSubnetwork": {(*rpcmodel.GetSubnetworkResult)(nil)},
"getTxOut": {(*rpcmodel.GetTxOutResult)(nil)},
"node": nil,
"help": {(*string)(nil), (*string)(nil)},
"ping": nil,
"removeManualNode": nil,
"searchRawTransactions": {(*string)(nil), (*[]rpcmodel.SearchRawTransactionsResult)(nil)},
"sendRawTransaction": {(*string)(nil)},
"stop": {(*string)(nil)},
"submitBlock": {nil, (*string)(nil)},

View File

@ -698,7 +698,7 @@ func (m *wsNotificationManager) notifyForNewTx(clients map[chan struct{}]*wsClie
initializeMarshalledJSONVerbose := func() bool {
net := m.server.cfg.DAGParams
build := func() ([]byte, bool) {
rawTx, err := createTxRawResult(net, mtx, txIDStr, nil, "", nil, nil, true)
rawTx, err := createTxRawResult(net, mtx, txIDStr, nil, "", nil, true)
if err != nil {
return nil, false
}