mirror of
https://github.com/kaspanet/kaspad.git
synced 2025-06-05 13:46:42 +00:00
[NOD-339] Remove cfilters and cfindex (#527)
* [NOD-339] Remove cfilters and cfindex * [NOD-339] Remove some leftovers
This commit is contained in:
parent
30f0e95969
commit
8680231e5a
@ -1,366 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package indexers
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/blockdag"
|
||||
"github.com/kaspanet/kaspad/dagconfig"
|
||||
"github.com/kaspanet/kaspad/database"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/gcs"
|
||||
"github.com/kaspanet/kaspad/util/gcs/builder"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
const (
|
||||
// cfIndexName is the human-readable name for the index.
|
||||
cfIndexName = "committed filter index"
|
||||
)
|
||||
|
||||
// Committed filters come in two flavours: basic and extended. They are
|
||||
// generated and dropped in pairs, and both are indexed by a block's hash.
|
||||
// Besides holding different content, they also live in different buckets.
|
||||
var (
|
||||
// cfIndexParentBucketKey is the name of the parent bucket used to house
|
||||
// the index. The rest of the buckets live below this bucket.
|
||||
cfIndexParentBucketKey = []byte("cfindexparentbucket")
|
||||
|
||||
// cfIndexKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cfilters.
|
||||
cfIndexKeys = [][]byte{
|
||||
[]byte("cf0byhashidx"),
|
||||
[]byte("cf1byhashidx"),
|
||||
}
|
||||
|
||||
// cfHeaderKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf headers.
|
||||
cfHeaderKeys = [][]byte{
|
||||
[]byte("cf0headerbyhashidx"),
|
||||
[]byte("cf1headerbyhashidx"),
|
||||
}
|
||||
|
||||
// cfHashKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf hashes.
|
||||
cfHashKeys = [][]byte{
|
||||
[]byte("cf0hashbyhashidx"),
|
||||
[]byte("cf1hashbyhashidx"),
|
||||
}
|
||||
|
||||
maxFilterType = uint8(len(cfHeaderKeys) - 1)
|
||||
)
|
||||
|
||||
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
|
||||
// An entry's absence is not considered an error.
|
||||
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) ([]byte, error) {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Get(h[:]), nil
|
||||
}
|
||||
|
||||
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
|
||||
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash, f []byte) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Put(h[:], f)
|
||||
}
|
||||
|
||||
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
|
||||
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *daghash.Hash) error {
|
||||
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
||||
return idx.Delete(h[:])
|
||||
}
|
||||
|
||||
// CfIndex implements a committed filter (cf) by hash index.
|
||||
type CfIndex struct {
|
||||
db database.DB
|
||||
dagParams *dagconfig.Params
|
||||
}
|
||||
|
||||
// Ensure the CfIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*CfIndex)(nil)
|
||||
|
||||
// Init initializes the hash-based cf index. This is part of the Indexer
|
||||
// interface.
|
||||
func (idx *CfIndex) Init(db database.DB, _ *blockdag.BlockDAG) error {
|
||||
idx.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// Key returns the database key to use for the index as a byte slice. This is
|
||||
// part of the Indexer interface.
|
||||
func (idx *CfIndex) Key() []byte {
|
||||
return cfIndexParentBucketKey
|
||||
}
|
||||
|
||||
// Name returns the human-readable name of the index. This is part of the
|
||||
// Indexer interface.
|
||||
func (idx *CfIndex) Name() string {
|
||||
return cfIndexName
|
||||
}
|
||||
|
||||
// Create is invoked when the indexer manager determines the index needs to
|
||||
// be created for the first time. It creates buckets for the two hash-based cf
|
||||
// indexes (simple, extended).
|
||||
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bucketName := range cfIndexKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHeaderKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucketName := range cfHashKeys {
|
||||
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeFilter stores a given filter, and performs the steps needed to
|
||||
// generate the filter's header.
|
||||
func storeFilter(dbTx database.Tx, block *util.Block, f *gcs.Filter,
|
||||
filterType wire.FilterType) error {
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return errors.New("unsupported filter type")
|
||||
}
|
||||
|
||||
// Figure out which buckets to use.
|
||||
fkey := cfIndexKeys[filterType]
|
||||
hkey := cfHeaderKeys[filterType]
|
||||
hashkey := cfHashKeys[filterType]
|
||||
|
||||
// Start by storing the filter.
|
||||
h := block.Hash()
|
||||
filterBytes, err := f.NBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next store the filter hash.
|
||||
filterHash, err := builder.GetFilterHash(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then fetch the previous block's filter header.
|
||||
var prevHeader *daghash.Hash
|
||||
header := block.MsgBlock().Header
|
||||
if header.IsGenesis() {
|
||||
prevHeader = &daghash.ZeroHash
|
||||
} else {
|
||||
// TODO(Evgeny): Current implementation of GCS filter inherited from chain
|
||||
// (single parent) and must be ported to DAG (multiple parents)
|
||||
var parentHash *daghash.Hash
|
||||
if header.NumParentBlocks() != 0 {
|
||||
parentHash = header.ParentHashes[0]
|
||||
}
|
||||
prevFilterHashBytes, err := dbFetchFilterIdxEntry(dbTx, hkey, parentHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the new block's filter header, and store it.
|
||||
prevHeader, err = daghash.NewHash(prevFilterHashBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fh, err := builder.MakeHeaderForFilter(f, prevHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
|
||||
}
|
||||
|
||||
// ConnectBlock is invoked by the index manager when a new block has been
|
||||
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
||||
// every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *util.Block, _ uint64,
|
||||
_ *blockdag.BlockDAG, _ blockdag.MultiBlockTxsAcceptanceData, _ blockdag.MultiBlockTxsAcceptanceData) error {
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err = builder.BuildExtFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return storeFilter(dbTx, block, f, wire.GCSFilterExtended)
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
// disconnected from the main chain. This indexer removes the hash-to-cf
|
||||
// mapping for every passed block. This is part of the Indexer interface.
|
||||
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *util.Block,
|
||||
_ *blockdag.BlockDAG) error {
|
||||
|
||||
for _, key := range cfIndexKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHeaderKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range cfHashKeys {
|
||||
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// entryByBlockHash fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and block hash.
|
||||
func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, h *daghash.Hash) ([]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
var entry []byte
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
var err error
|
||||
entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
|
||||
return err
|
||||
})
|
||||
return entry, err
|
||||
}
|
||||
|
||||
// entriesByBlockHashes batch fetches a filter index entry of a particular type
|
||||
// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
|
||||
func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
|
||||
filterType wire.FilterType, blockHashes []*daghash.Hash) ([][]byte, error) {
|
||||
|
||||
if uint8(filterType) > maxFilterType {
|
||||
return nil, errors.New("unsupported filter type")
|
||||
}
|
||||
key := filterTypeKeys[filterType]
|
||||
|
||||
entries := make([][]byte, 0, len(blockHashes))
|
||||
err := idx.db.View(func(dbTx database.Tx) error {
|
||||
for _, blockHash := range blockHashes {
|
||||
entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// FilterByBlockHash returns the serialized contents of a block's basic or
|
||||
// extended committed filter.
|
||||
func (idx *CfIndex) FilterByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FiltersByBlockHashes returns the serialized contents of a block's basic or
|
||||
// extended committed filter for a set of blocks by hash.
|
||||
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter header.
|
||||
func (idx *CfIndex) FilterHeaderByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHeadersByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter header for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHashByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash.
|
||||
func (idx *CfIndex) FilterHashByBlockHash(h *daghash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHashKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*daghash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// Recover is invoked when the indexer wasn't turned on for several blocks
|
||||
// and the indexer needs to close the gaps.
|
||||
//
|
||||
// This is part of the Indexer interface.
|
||||
func (idx *CfIndex) Recover(dbTx database.Tx, currentBlockID, lastKnownBlockID uint64) error {
|
||||
return errors.Errorf("cfindex was turned off for %d blocks and can't be recovered."+
|
||||
" To resume working drop the cfindex with --dropcfindex", lastKnownBlockID-currentBlockID)
|
||||
}
|
||||
|
||||
// NewCfIndex returns a new instance of an indexer that is used to create a
|
||||
// mapping of the hashes of all blocks in the blockchain to their respective
|
||||
// committed filters.
|
||||
//
|
||||
// It implements the Indexer interface which plugs into the IndexManager that
|
||||
// in turn is used by the blockchain package. This allows the index to be
|
||||
// seamlessly maintained along with the chain.
|
||||
func NewCfIndex(dagParams *dagconfig.Params) *CfIndex {
|
||||
return &CfIndex{dagParams: dagParams}
|
||||
}
|
||||
|
||||
// DropCfIndex drops the CF index from the provided database if exists.
|
||||
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
|
||||
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
|
||||
}
|
8
btcd.go
8
btcd.go
@ -155,14 +155,6 @@ func btcdMain(serverChan chan<- *server.Server) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
if cfg.DropCfIndex {
|
||||
if err := indexers.DropCfIndex(db, interrupt); err != nil {
|
||||
btcdLog.Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create server and start it.
|
||||
server, err := server.NewServer(cfg.Listeners, db, config.ActiveConfig().NetParams(),
|
||||
|
@ -10,8 +10,6 @@ package btcjson
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// AddManualNodeCmd defines the addManualNode JSON-RPC command.
|
||||
@ -293,37 +291,6 @@ func NewGetBlockTemplateCmd(request *TemplateRequest) *GetBlockTemplateCmd {
|
||||
}
|
||||
}
|
||||
|
||||
// GetCFilterCmd defines the getCFilter JSON-RPC command.
|
||||
type GetCFilterCmd struct {
|
||||
Hash string
|
||||
FilterType wire.FilterType
|
||||
}
|
||||
|
||||
// NewGetCFilterCmd returns a new instance which can be used to issue a
|
||||
// getCFilter JSON-RPC command.
|
||||
func NewGetCFilterCmd(hash string, filterType wire.FilterType) *GetCFilterCmd {
|
||||
return &GetCFilterCmd{
|
||||
Hash: hash,
|
||||
FilterType: filterType,
|
||||
}
|
||||
}
|
||||
|
||||
// GetCFilterHeaderCmd defines the getCFilterHeader JSON-RPC command.
|
||||
type GetCFilterHeaderCmd struct {
|
||||
Hash string
|
||||
FilterType wire.FilterType
|
||||
}
|
||||
|
||||
// NewGetCFilterHeaderCmd returns a new instance which can be used to issue a
|
||||
// getCFilterHeader JSON-RPC command.
|
||||
func NewGetCFilterHeaderCmd(hash string,
|
||||
filterType wire.FilterType) *GetCFilterHeaderCmd {
|
||||
return &GetCFilterHeaderCmd{
|
||||
Hash: hash,
|
||||
FilterType: filterType,
|
||||
}
|
||||
}
|
||||
|
||||
// GetChainFromBlockCmd defines the getChainFromBlock JSON-RPC command.
|
||||
type GetChainFromBlockCmd struct {
|
||||
IncludeBlocks bool `json:"includeBlocks"`
|
||||
@ -747,8 +714,6 @@ func init() {
|
||||
MustRegisterCmd("getBlockCount", (*GetBlockCountCmd)(nil), flags)
|
||||
MustRegisterCmd("getBlockHeader", (*GetBlockHeaderCmd)(nil), flags)
|
||||
MustRegisterCmd("getBlockTemplate", (*GetBlockTemplateCmd)(nil), flags)
|
||||
MustRegisterCmd("getCFilter", (*GetCFilterCmd)(nil), flags)
|
||||
MustRegisterCmd("getCFilterHeader", (*GetCFilterHeaderCmd)(nil), flags)
|
||||
MustRegisterCmd("getChainFromBlock", (*GetChainFromBlockCmd)(nil), flags)
|
||||
MustRegisterCmd("getDagTips", (*GetDAGTipsCmd)(nil), flags)
|
||||
MustRegisterCmd("getConnectionCount", (*GetConnectionCountCmd)(nil), flags)
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/btcjson"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// TestDAGSvrCmds tests all of the dag server commands marshal and unmarshal
|
||||
@ -324,38 +323,6 @@ func TestDAGSvrCmds(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getCFilter",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getCFilter", "123",
|
||||
wire.GCSFilterExtended)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetCFilterCmd("123",
|
||||
wire.GCSFilterExtended)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getCFilter","params":["123",1],"id":1}`,
|
||||
unmarshalled: &btcjson.GetCFilterCmd{
|
||||
Hash: "123",
|
||||
FilterType: wire.GCSFilterExtended,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getCFilterHeader",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getCFilterHeader", "123",
|
||||
wire.GCSFilterExtended)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetCFilterHeaderCmd("123",
|
||||
wire.GCSFilterExtended)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getCFilterHeader","params":["123",1],"id":1}`,
|
||||
unmarshalled: &btcjson.GetCFilterHeaderCmd{
|
||||
Hash: "123",
|
||||
FilterType: wire.GCSFilterExtended,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getChainFromBlock",
|
||||
newCmd: func() (interface{}, error) {
|
||||
|
@ -59,7 +59,6 @@ const (
|
||||
ErrRPCOutOfRange RPCErrorCode = -1
|
||||
ErrRPCNoTxInfo RPCErrorCode = -5
|
||||
ErrRPCNoAcceptanceIndex RPCErrorCode = -5
|
||||
ErrRPCNoCFIndex RPCErrorCode = -5
|
||||
ErrRPCNoNewestBlockInfo RPCErrorCode = -5
|
||||
ErrRPCInvalidTxVout RPCErrorCode = -5
|
||||
ErrRPCSubnetworkNotFound RPCErrorCode = -5
|
||||
|
@ -146,8 +146,6 @@ type Flags struct {
|
||||
BlockMaxMass uint64 `long:"blockmaxmass" description:"Maximum transaction mass to be used when creating a block"`
|
||||
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
|
||||
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
|
||||
EnableCFilters bool `long:"enablecfilters" description:"Enable committed filtering (CF) support"`
|
||||
DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."`
|
||||
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
|
||||
BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."`
|
||||
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
||||
|
1
doc.go
1
doc.go
@ -112,7 +112,6 @@ Application Options:
|
||||
--blockprioritysize= Size in bytes for high-priority/low-fee transactions
|
||||
when creating a block (50000)
|
||||
--nopeerbloomfilters Disable bloom filtering support.
|
||||
--nocfilters Disable committed filtering (CF) support.
|
||||
--sigcachemaxsize= The maximum number of entries in the signature
|
||||
verification cache.
|
||||
--blocksonly Do not accept transactions from remote peers.
|
||||
|
48
peer/peer.go
48
peer/peer.go
@ -117,17 +117,6 @@ type MessageListeners struct {
|
||||
// OnBlock is invoked when a peer receives a block bitcoin message.
|
||||
OnBlock func(p *Peer, msg *wire.MsgBlock, buf []byte)
|
||||
|
||||
// OnCFilter is invoked when a peer receives a cfilter bitcoin message.
|
||||
OnCFilter func(p *Peer, msg *wire.MsgCFilter)
|
||||
|
||||
// OnCFHeaders is invoked when a peer receives a cfheaders bitcoin
|
||||
// message.
|
||||
OnCFHeaders func(p *Peer, msg *wire.MsgCFHeaders)
|
||||
|
||||
// OnCFCheckpt is invoked when a peer receives a cfcheckpt bitcoin
|
||||
// message.
|
||||
OnCFCheckpt func(p *Peer, msg *wire.MsgCFCheckpt)
|
||||
|
||||
// OnInv is invoked when a peer receives an inv bitcoin message.
|
||||
OnInv func(p *Peer, msg *wire.MsgInv)
|
||||
|
||||
@ -155,18 +144,6 @@ type MessageListeners struct {
|
||||
// message.
|
||||
OnGetHeaders func(p *Peer, msg *wire.MsgGetHeaders)
|
||||
|
||||
// OnGetCFilters is invoked when a peer receives a getcfilters bitcoin
|
||||
// message.
|
||||
OnGetCFilters func(p *Peer, msg *wire.MsgGetCFilters)
|
||||
|
||||
// OnGetCFHeaders is invoked when a peer receives a getcfheaders
|
||||
// bitcoin message.
|
||||
OnGetCFHeaders func(p *Peer, msg *wire.MsgGetCFHeaders)
|
||||
|
||||
// OnGetCFCheckpt is invoked when a peer receives a getcfcheckpt
|
||||
// bitcoin message.
|
||||
OnGetCFCheckpt func(p *Peer, msg *wire.MsgGetCFCheckpt)
|
||||
|
||||
// OnFeeFilter is invoked when a peer receives a feefilter bitcoin message.
|
||||
OnFeeFilter func(p *Peer, msg *wire.MsgFeeFilter)
|
||||
|
||||
@ -1517,31 +1494,6 @@ out:
|
||||
p.cfg.Listeners.OnGetHeaders(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgGetCFilters:
|
||||
if p.cfg.Listeners.OnGetCFilters != nil {
|
||||
p.cfg.Listeners.OnGetCFilters(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgGetCFHeaders:
|
||||
if p.cfg.Listeners.OnGetCFHeaders != nil {
|
||||
p.cfg.Listeners.OnGetCFHeaders(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgGetCFCheckpt:
|
||||
if p.cfg.Listeners.OnGetCFCheckpt != nil {
|
||||
p.cfg.Listeners.OnGetCFCheckpt(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgCFilter:
|
||||
if p.cfg.Listeners.OnCFilter != nil {
|
||||
p.cfg.Listeners.OnCFilter(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgCFHeaders:
|
||||
if p.cfg.Listeners.OnCFHeaders != nil {
|
||||
p.cfg.Listeners.OnCFHeaders(p, msg)
|
||||
}
|
||||
|
||||
case *wire.MsgFeeFilter:
|
||||
if p.cfg.Listeners.OnFeeFilter != nil {
|
||||
p.cfg.Listeners.OnFeeFilter(p, msg)
|
||||
|
@ -375,21 +375,6 @@ func TestPeerListeners(t *testing.T) {
|
||||
OnGetHeaders: func(p *peer.Peer, msg *wire.MsgGetHeaders) {
|
||||
ok <- msg
|
||||
},
|
||||
OnGetCFilters: func(p *peer.Peer, msg *wire.MsgGetCFilters) {
|
||||
ok <- msg
|
||||
},
|
||||
OnGetCFHeaders: func(p *peer.Peer, msg *wire.MsgGetCFHeaders) {
|
||||
ok <- msg
|
||||
},
|
||||
OnGetCFCheckpt: func(p *peer.Peer, msg *wire.MsgGetCFCheckpt) {
|
||||
ok <- msg
|
||||
},
|
||||
OnCFilter: func(p *peer.Peer, msg *wire.MsgCFilter) {
|
||||
ok <- msg
|
||||
},
|
||||
OnCFHeaders: func(p *peer.Peer, msg *wire.MsgCFHeaders) {
|
||||
ok <- msg
|
||||
},
|
||||
OnFeeFilter: func(p *peer.Peer, msg *wire.MsgFeeFilter) {
|
||||
ok <- msg
|
||||
},
|
||||
@ -506,27 +491,6 @@ func TestPeerListeners(t *testing.T) {
|
||||
"OnGetHeaders",
|
||||
wire.NewMsgGetHeaders(&daghash.Hash{}, &daghash.Hash{}),
|
||||
},
|
||||
{
|
||||
"OnGetCFilters",
|
||||
wire.NewMsgGetCFilters(wire.GCSFilterRegular, 0, &daghash.Hash{}),
|
||||
},
|
||||
{
|
||||
"OnGetCFHeaders",
|
||||
wire.NewMsgGetCFHeaders(wire.GCSFilterRegular, 0, &daghash.Hash{}),
|
||||
},
|
||||
{
|
||||
"OnGetCFCheckpt",
|
||||
wire.NewMsgGetCFCheckpt(wire.GCSFilterRegular, &daghash.Hash{}),
|
||||
},
|
||||
{
|
||||
"OnCFilter",
|
||||
wire.NewMsgCFilter(wire.GCSFilterRegular, &daghash.Hash{},
|
||||
[]byte("payload")),
|
||||
},
|
||||
{
|
||||
"OnCFHeaders",
|
||||
wire.NewMsgCFHeaders(),
|
||||
},
|
||||
{
|
||||
"OnFeeFilter",
|
||||
wire.NewMsgFeeFilter(15000),
|
||||
|
109
rpcclient/dag.go
109
rpcclient/dag.go
@ -788,112 +788,3 @@ func (c *Client) InvalidateBlockAsync(blockHash *daghash.Hash) FutureInvalidateB
|
||||
func (c *Client) InvalidateBlock(blockHash *daghash.Hash) error {
|
||||
return c.InvalidateBlockAsync(blockHash).Receive()
|
||||
}
|
||||
|
||||
// FutureGetCFilterResult is a future promise to deliver the result of a
|
||||
// GetCFilterAsync RPC invocation (or an applicable error).
|
||||
type FutureGetCFilterResult chan *response
|
||||
|
||||
// Receive waits for the response promised by the future and returns the raw
|
||||
// filter requested from the server given its block hash.
|
||||
func (r FutureGetCFilterResult) Receive() (*wire.MsgCFilter, error) {
|
||||
res, err := receiveFuture(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal result as a string.
|
||||
var filterHex string
|
||||
err = json.Unmarshal(res, &filterHex)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Decode the serialized cf hex to raw bytes.
|
||||
serializedFilter, err := hex.DecodeString(filterHex)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Assign the filter bytes to the correct field of the wire message.
|
||||
// We aren't going to set the block hash or extended flag, since we
|
||||
// don't actually get that back in the RPC response.
|
||||
var msgCFilter wire.MsgCFilter
|
||||
msgCFilter.Data = serializedFilter
|
||||
return &msgCFilter, nil
|
||||
}
|
||||
|
||||
// GetCFilterAsync returns an instance of a type that can be used to get the
|
||||
// result of the RPC at some future time by invoking the Receive function on the
|
||||
// returned instance.
|
||||
//
|
||||
// See GetCFilter for the blocking version and more details.
|
||||
func (c *Client) GetCFilterAsync(blockHash *daghash.Hash,
|
||||
filterType wire.FilterType) FutureGetCFilterResult {
|
||||
hash := ""
|
||||
if blockHash != nil {
|
||||
hash = blockHash.String()
|
||||
}
|
||||
|
||||
cmd := btcjson.NewGetCFilterCmd(hash, filterType)
|
||||
return c.sendCmd(cmd)
|
||||
}
|
||||
|
||||
// GetCFilter returns a raw filter from the server given its block hash.
|
||||
func (c *Client) GetCFilter(blockHash *daghash.Hash,
|
||||
filterType wire.FilterType) (*wire.MsgCFilter, error) {
|
||||
return c.GetCFilterAsync(blockHash, filterType).Receive()
|
||||
}
|
||||
|
||||
// FutureGetCFilterHeaderResult is a future promise to deliver the result of a
|
||||
// GetCFilterHeaderAsync RPC invocation (or an applicable error).
|
||||
type FutureGetCFilterHeaderResult chan *response
|
||||
|
||||
// Receive waits for the response promised by the future and returns the raw
|
||||
// filter header requested from the server given its block hash.
|
||||
func (r FutureGetCFilterHeaderResult) Receive() (*wire.MsgCFHeaders, error) {
|
||||
res, err := receiveFuture(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal result as a string.
|
||||
var headerHex string
|
||||
err = json.Unmarshal(res, &headerHex)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Assign the decoded header into a hash
|
||||
headerHash, err := daghash.NewHashFromStr(headerHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assign the hash to a headers message and return it.
|
||||
msgCFHeaders := wire.MsgCFHeaders{PrevFilterHeader: headerHash}
|
||||
return &msgCFHeaders, nil
|
||||
|
||||
}
|
||||
|
||||
// GetCFilterHeaderAsync returns an instance of a type that can be used to get
|
||||
// the result of the RPC at some future time by invoking the Receive function
|
||||
// on the returned instance.
|
||||
//
|
||||
// See GetCFilterHeader for the blocking version and more details.
|
||||
func (c *Client) GetCFilterHeaderAsync(blockHash *daghash.Hash,
|
||||
filterType wire.FilterType) FutureGetCFilterHeaderResult {
|
||||
hash := ""
|
||||
if blockHash != nil {
|
||||
hash = blockHash.String()
|
||||
}
|
||||
|
||||
cmd := btcjson.NewGetCFilterHeaderCmd(hash, filterType)
|
||||
return c.sendCmd(cmd)
|
||||
}
|
||||
|
||||
// GetCFilterHeader returns a raw filter header from the server given its block
|
||||
// hash.
|
||||
func (c *Client) GetCFilterHeader(blockHash *daghash.Hash,
|
||||
filterType wire.FilterType) (*wire.MsgCFHeaders, error) {
|
||||
return c.GetCFilterHeaderAsync(blockHash, filterType).Receive()
|
||||
}
|
||||
|
@ -167,9 +167,6 @@
|
||||
; Must not include characters '/', ':', '(' and ')'.
|
||||
; uacomment=
|
||||
|
||||
; Enable committed peer filtering (CF).
|
||||
; enablecfilters=1
|
||||
|
||||
; ------------------------------------------------------------------------------
|
||||
; RPC server options - The following options control the built-in RPC server
|
||||
; which is used to control and query information from a running btcd process.
|
||||
|
@ -1,37 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/peer"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// OnGetCFilters is invoked when a peer receives a getcfilters bitcoin message.
|
||||
func (sp *Peer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) {
|
||||
// Ignore getcfilters requests if not in sync.
|
||||
if !sp.server.SyncManager.IsCurrent() {
|
||||
return
|
||||
}
|
||||
|
||||
hashes, err := sp.server.DAG.ChainHeightToHashRange(msg.StartHeight,
|
||||
msg.StopHash, wire.MaxGetCFiltersReqRange)
|
||||
if err != nil {
|
||||
peerLog.Debugf("Invalid getcfilters request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
filters, err := sp.server.CfIndex.FiltersByBlockHashes(hashes,
|
||||
msg.FilterType)
|
||||
if err != nil {
|
||||
peerLog.Errorf("Error retrieving cfilters: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i, filterBytes := range filters {
|
||||
if len(filterBytes) == 0 {
|
||||
peerLog.Warnf("Could not obtain cfilter for %s", hashes[i])
|
||||
return
|
||||
}
|
||||
filterMsg := wire.NewMsgCFilter(msg.FilterType, hashes[i], filterBytes)
|
||||
sp.QueueMessage(filterMsg, nil)
|
||||
}
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/peer"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// OnGetCFCheckpt is invoked when a peer receives a getcfcheckpt bitcoin message.
|
||||
func (sp *Peer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) {
|
||||
// Ignore getcfcheckpt requests if not in sync.
|
||||
if !sp.server.SyncManager.IsCurrent() {
|
||||
return
|
||||
}
|
||||
|
||||
blockHashes, err := sp.server.DAG.IntervalBlockHashes(msg.StopHash,
|
||||
wire.CFCheckptInterval)
|
||||
if err != nil {
|
||||
peerLog.Debugf("Invalid getcfilters request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
var updateCache bool
|
||||
var checkptCache []cfHeaderKV
|
||||
|
||||
if len(blockHashes) > len(checkptCache) {
|
||||
// Update the cache if the checkpoint chain is longer than the cached
|
||||
// one. This ensures that the cache is relatively stable and mostly
|
||||
// overlaps with the best chain, since it follows the longest chain
|
||||
// heuristic.
|
||||
updateCache = true
|
||||
|
||||
// Take write lock because we are going to update cache.
|
||||
sp.server.cfCheckptCachesMtx.Lock()
|
||||
defer sp.server.cfCheckptCachesMtx.Unlock()
|
||||
|
||||
// Grow the checkptCache to be the length of blockHashes.
|
||||
additionalLength := len(blockHashes) - len(checkptCache)
|
||||
checkptCache = append(sp.server.cfCheckptCaches[msg.FilterType],
|
||||
make([]cfHeaderKV, additionalLength)...)
|
||||
} else {
|
||||
updateCache = false
|
||||
|
||||
// Take reader lock because we are not going to update cache.
|
||||
sp.server.cfCheckptCachesMtx.RLock()
|
||||
defer sp.server.cfCheckptCachesMtx.RUnlock()
|
||||
|
||||
checkptCache = sp.server.cfCheckptCaches[msg.FilterType]
|
||||
}
|
||||
|
||||
// Iterate backwards until the block hash is found in the cache.
|
||||
var forkIdx int
|
||||
for forkIdx = len(checkptCache); forkIdx > 0; forkIdx-- {
|
||||
if checkptCache[forkIdx-1].blockHash.IsEqual(blockHashes[forkIdx-1]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Populate results with cached checkpoints.
|
||||
checkptMsg := wire.NewMsgCFCheckpt(msg.FilterType, msg.StopHash,
|
||||
len(blockHashes))
|
||||
for i := 0; i < forkIdx; i++ {
|
||||
checkptMsg.AddCFHeader(checkptCache[i].filterHeader)
|
||||
}
|
||||
|
||||
// Look up any filter headers that aren't cached.
|
||||
blockHashPtrs := make([]*daghash.Hash, 0, len(blockHashes)-forkIdx)
|
||||
for i := forkIdx; i < len(blockHashes); i++ {
|
||||
blockHashPtrs = append(blockHashPtrs, blockHashes[i])
|
||||
}
|
||||
|
||||
filterHeaders, err := sp.server.CfIndex.FilterHeadersByBlockHashes(blockHashPtrs,
|
||||
msg.FilterType)
|
||||
if err != nil {
|
||||
peerLog.Errorf("Error retrieving cfilter headers: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i, filterHeaderBytes := range filterHeaders {
|
||||
if len(filterHeaderBytes) == 0 {
|
||||
peerLog.Warnf("Could not obtain CF header for %s", blockHashPtrs[i])
|
||||
return
|
||||
}
|
||||
|
||||
filterHeader, err := daghash.NewHash(filterHeaderBytes)
|
||||
if err != nil {
|
||||
peerLog.Warnf("Committed filter header deserialize "+
|
||||
"failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
checkptMsg.AddCFHeader(filterHeader)
|
||||
if updateCache {
|
||||
checkptCache[forkIdx+i] = cfHeaderKV{
|
||||
blockHash: blockHashes[forkIdx+i],
|
||||
filterHeader: filterHeader,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if updateCache {
|
||||
sp.server.cfCheckptCaches[msg.FilterType] = checkptCache
|
||||
}
|
||||
|
||||
sp.QueueMessage(checkptMsg, nil)
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/peer"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// OnGetCFHeaders is invoked when a peer receives a getcfheader bitcoin message.
|
||||
func (sp *Peer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
|
||||
// Ignore getcfilterheader requests if not in sync.
|
||||
if !sp.server.SyncManager.IsCurrent() {
|
||||
return
|
||||
}
|
||||
|
||||
startHeight := msg.StartHeight
|
||||
maxResults := wire.MaxCFHeadersPerMsg
|
||||
|
||||
// If StartHeight is positive, fetch the predecessor block hash so we can
|
||||
// populate the PrevFilterHeader field.
|
||||
if msg.StartHeight > 0 {
|
||||
startHeight--
|
||||
maxResults++
|
||||
}
|
||||
|
||||
// Fetch the hashes from the block index.
|
||||
hashList, err := sp.server.DAG.ChainHeightToHashRange(startHeight,
|
||||
msg.StopHash, maxResults)
|
||||
if err != nil {
|
||||
peerLog.Debugf("Invalid getcfheaders request: %s", err)
|
||||
}
|
||||
|
||||
// This is possible if StartHeight is one greater that the height of
|
||||
// StopHash, and we pull a valid range of hashes including the previous
|
||||
// filter header.
|
||||
if len(hashList) == 0 || (msg.StartHeight > 0 && len(hashList) == 1) {
|
||||
peerLog.Debug("No results for getcfheaders request")
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch the raw filter hash bytes from the database for all blocks.
|
||||
filterHashes, err := sp.server.CfIndex.FilterHashesByBlockHashes(hashList,
|
||||
msg.FilterType)
|
||||
if err != nil {
|
||||
peerLog.Errorf("Error retrieving cfilter hashes: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate cfheaders message and send it.
|
||||
headersMsg := wire.NewMsgCFHeaders()
|
||||
|
||||
// Populate the PrevFilterHeader field.
|
||||
if msg.StartHeight > 0 {
|
||||
parentHash := hashList[0]
|
||||
|
||||
// Fetch the raw committed filter header bytes from the
|
||||
// database.
|
||||
headerBytes, err := sp.server.CfIndex.FilterHeaderByBlockHash(
|
||||
parentHash, msg.FilterType)
|
||||
if err != nil {
|
||||
peerLog.Errorf("Error retrieving CF header: %s", err)
|
||||
return
|
||||
}
|
||||
if len(headerBytes) == 0 {
|
||||
peerLog.Warnf("Could not obtain CF header for %s", parentHash)
|
||||
return
|
||||
}
|
||||
|
||||
// Deserialize the hash into PrevFilterHeader.
|
||||
err = headersMsg.PrevFilterHeader.SetBytes(headerBytes)
|
||||
if err != nil {
|
||||
peerLog.Warnf("Committed filter header deserialize "+
|
||||
"failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
hashList = hashList[1:]
|
||||
filterHashes = filterHashes[1:]
|
||||
}
|
||||
|
||||
// Populate HeaderHashes.
|
||||
for i, hashBytes := range filterHashes {
|
||||
if len(hashBytes) == 0 {
|
||||
peerLog.Warnf("Could not obtain CF hash for %s", hashList[i])
|
||||
return
|
||||
}
|
||||
|
||||
// Deserialize the hash.
|
||||
filterHash, err := daghash.NewHash(hashBytes)
|
||||
if err != nil {
|
||||
peerLog.Warnf("Committed filter hash deserialize "+
|
||||
"failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
headersMsg.AddCFHash(filterHash)
|
||||
}
|
||||
|
||||
headersMsg.FilterType = msg.FilterType
|
||||
headersMsg.StopHash = msg.StopHash
|
||||
sp.QueueMessage(headersMsg, nil)
|
||||
}
|
@ -300,12 +300,6 @@ type Server struct {
|
||||
TxIndex *indexers.TxIndex
|
||||
AddrIndex *indexers.AddrIndex
|
||||
AcceptanceIndex *indexers.AcceptanceIndex
|
||||
CfIndex *indexers.CfIndex
|
||||
|
||||
// cfCheckptCaches stores a cached slice of filter headers for cfcheckpt
|
||||
// messages for each filter type.
|
||||
cfCheckptCaches map[wire.FilterType][]cfHeaderKV
|
||||
cfCheckptCachesMtx sync.RWMutex
|
||||
|
||||
notifyNewTransactions func(txns []*mempool.TxDesc)
|
||||
isRPCServerActive bool
|
||||
@ -1071,9 +1065,6 @@ func newPeerConfig(sp *Peer) *peer.Config {
|
||||
OnBlockLocator: sp.OnBlockLocator,
|
||||
OnGetBlockInvs: sp.OnGetBlockInvs,
|
||||
OnGetHeaders: sp.OnGetHeaders,
|
||||
OnGetCFilters: sp.OnGetCFilters,
|
||||
OnGetCFHeaders: sp.OnGetCFHeaders,
|
||||
OnGetCFCheckpt: sp.OnGetCFCheckpt,
|
||||
OnFeeFilter: sp.OnFeeFilter,
|
||||
OnFilterAdd: sp.OnFilterAdd,
|
||||
OnFilterClear: sp.OnFilterClear,
|
||||
@ -1596,9 +1587,6 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
||||
if config.ActiveConfig().NoPeerBloomFilters {
|
||||
services &^= wire.SFNodeBloom
|
||||
}
|
||||
if !config.ActiveConfig().EnableCFilters {
|
||||
services &^= wire.SFNodeCF
|
||||
}
|
||||
|
||||
amgr := addrmgr.New(config.ActiveConfig().DataDir, serverutils.BTCDLookup, config.ActiveConfig().SubnetworkID)
|
||||
|
||||
@ -1634,7 +1622,6 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
||||
TimeSource: blockdag.NewMedianTime(),
|
||||
services: services,
|
||||
SigCache: txscript.NewSigCache(config.ActiveConfig().SigCacheMaxSize),
|
||||
cfCheckptCaches: make(map[wire.FilterType][]cfHeaderKV),
|
||||
notifyNewTransactions: notifyNewTransactions,
|
||||
}
|
||||
|
||||
@ -1669,11 +1656,6 @@ func NewServer(listenAddrs []string, db database.DB, dagParams *dagconfig.Params
|
||||
s.AcceptanceIndex = indexers.NewAcceptanceIndex()
|
||||
indexes = append(indexes, s.AcceptanceIndex)
|
||||
}
|
||||
if config.ActiveConfig().EnableCFilters {
|
||||
indxLog.Info("cf index is enabled")
|
||||
s.CfIndex = indexers.NewCfIndex(dagParams)
|
||||
indexes = append(indexes, s.CfIndex)
|
||||
}
|
||||
|
||||
// Create an index manager if any of the optional indexes are enabled.
|
||||
var indexManager blockdag.IndexManager
|
||||
|
@ -1,36 +0,0 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/btcjson"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// handleGetCFilter implements the getCFilter command.
|
||||
func handleGetCFilter(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
if s.cfg.CfIndex == nil {
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCNoCFIndex,
|
||||
Message: "The CF index must be enabled for this command",
|
||||
}
|
||||
}
|
||||
|
||||
c := cmd.(*btcjson.GetCFilterCmd)
|
||||
hash, err := daghash.NewHashFromStr(c.Hash)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(c.Hash)
|
||||
}
|
||||
|
||||
filterBytes, err := s.cfg.CfIndex.FilterByBlockHash(hash, c.FilterType)
|
||||
if err != nil {
|
||||
log.Debugf("Could not find committed filter for %s: %s",
|
||||
hash, err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCBlockNotFound,
|
||||
Message: "Block not found",
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("Found committed filter for %s", hash)
|
||||
return hex.EncodeToString(filterBytes), nil
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/btcjson"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// handleGetCFilterHeader implements the getCFilterHeader command.
|
||||
func handleGetCFilterHeader(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||
if s.cfg.CfIndex == nil {
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCNoCFIndex,
|
||||
Message: "The CF index must be enabled for this command",
|
||||
}
|
||||
}
|
||||
|
||||
c := cmd.(*btcjson.GetCFilterHeaderCmd)
|
||||
hash, err := daghash.NewHashFromStr(c.Hash)
|
||||
if err != nil {
|
||||
return nil, rpcDecodeHexError(c.Hash)
|
||||
}
|
||||
|
||||
headerBytes, err := s.cfg.CfIndex.FilterHeaderByBlockHash(hash, c.FilterType)
|
||||
if len(headerBytes) > 0 {
|
||||
log.Debugf("Found header of committed filter for %s", hash)
|
||||
} else {
|
||||
log.Debugf("Could not find header of committed filter for %s: %s",
|
||||
hash, err)
|
||||
return nil, &btcjson.RPCError{
|
||||
Code: btcjson.ErrRPCBlockNotFound,
|
||||
Message: "Block not found",
|
||||
}
|
||||
}
|
||||
|
||||
hash.SetBytes(headerBytes)
|
||||
return hash.String(), nil
|
||||
}
|
@ -74,8 +74,6 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
|
||||
"getBlockCount": handleGetBlockCount,
|
||||
"getBlockHeader": handleGetBlockHeader,
|
||||
"getBlockTemplate": handleGetBlockTemplate,
|
||||
"getCFilter": handleGetCFilter,
|
||||
"getCFilterHeader": handleGetCFilterHeader,
|
||||
"getChainFromBlock": handleGetChainFromBlock,
|
||||
"getConnectionCount": handleGetConnectionCount,
|
||||
"getCurrentNet": handleGetCurrentNet,
|
||||
@ -147,8 +145,6 @@ var rpcLimited = map[string]struct{}{
|
||||
"getBlockCount": {},
|
||||
"getBlockHash": {},
|
||||
"getBlockHeader": {},
|
||||
"getCFilter": {},
|
||||
"getCFilterHeader": {},
|
||||
"getChainFromBlock": {},
|
||||
"getCurrentNet": {},
|
||||
"getDifficulty": {},
|
||||
@ -810,7 +806,6 @@ type rpcserverConfig struct {
|
||||
TxIndex *indexers.TxIndex
|
||||
AddrIndex *indexers.AddrIndex
|
||||
AcceptanceIndex *indexers.AcceptanceIndex
|
||||
CfIndex *indexers.CfIndex
|
||||
}
|
||||
|
||||
// setupRPCListeners returns a slice of listeners that are configured for use
|
||||
@ -894,7 +889,6 @@ func NewRPCServer(
|
||||
TxIndex: p2pServer.TxIndex,
|
||||
AddrIndex: p2pServer.AddrIndex,
|
||||
AcceptanceIndex: p2pServer.AcceptanceIndex,
|
||||
CfIndex: p2pServer.CfIndex,
|
||||
DAG: p2pServer.DAG,
|
||||
}
|
||||
rpc := Server{
|
||||
|
@ -360,12 +360,6 @@ var helpDescsEnUS = map[string]string{
|
||||
"getBlockTemplate--condition2": "mode=proposal, accepted",
|
||||
"getBlockTemplate--result1": "An error string which represents why the proposal was rejected or nothing if accepted",
|
||||
|
||||
// GetCFilterCmd help.
|
||||
"getCFilter--synopsis": "Returns a block's committed filter given its hash.",
|
||||
"getCFilter-filterType": "The type of filter to return (0=regular, 1=extended)",
|
||||
"getCFilter-hash": "The hash of the block",
|
||||
"getCFilter--result0": "The block's committed filter",
|
||||
|
||||
// GetChainFromBlockCmd help.
|
||||
"getChainFromBlock--synopsis": "Return the selected parent chain starting from startHash up to the virtual. If startHash is not in the selected parent chain, it goes down the DAG until it does reach a hash in the selected parent chain while collecting hashes into removedChainBlockHashes.",
|
||||
"getChainFromBlock-startHash": "Hash of the bottom of the requested chain. If this hash is unknown or is not a chain block - returns an error.",
|
||||
@ -377,12 +371,6 @@ var helpDescsEnUS = map[string]string{
|
||||
"getChainFromBlockResult-addedChainBlocks": "List of ChainBlocks from Virtual.SelectedTip to StartHash (excluding StartHash) ordered bottom-to-top.",
|
||||
"getChainFromBlockResult-blocks": "If includeBlocks=true - contains the contents of all chain and accepted blocks in the AddedChainBlocks. Otherwise - omitted.",
|
||||
|
||||
// GetCFilterHeaderCmd help.
|
||||
"getCFilterHeader--synopsis": "Returns a block's compact filter header given its hash.",
|
||||
"getCFilterHeader-filterType": "The type of filter header to return (0=regular, 1=extended)",
|
||||
"getCFilterHeader-hash": "The hash of the block",
|
||||
"getCFilterHeader--result0": "The block's gcs filter header",
|
||||
|
||||
// GetConnectionCountCmd help.
|
||||
"getConnectionCount--synopsis": "Returns the number of active connections to other peers.",
|
||||
"getConnectionCount--result0": "The number of connections",
|
||||
@ -688,8 +676,6 @@ var rpcResultTypes = map[string][]interface{}{
|
||||
"getBlockHeader": {(*string)(nil), (*btcjson.GetBlockHeaderVerboseResult)(nil)},
|
||||
"getBlockTemplate": {(*btcjson.GetBlockTemplateResult)(nil), (*string)(nil), nil},
|
||||
"getBlockDagInfo": {(*btcjson.GetBlockDAGInfoResult)(nil)},
|
||||
"getCFilter": {(*string)(nil)},
|
||||
"getCFilterHeader": {(*string)(nil)},
|
||||
"getChainFromBlock": {(*btcjson.GetChainFromBlockResult)(nil)},
|
||||
"getConnectionCount": {(*int32)(nil)},
|
||||
"getCurrentNet": {(*uint32)(nil)},
|
||||
|
@ -1,24 +0,0 @@
|
||||
gcs
|
||||
==========
|
||||
|
||||
[]
|
||||
(https://travis-ci.org/btcsuite/btcutil) [![ISC License]
|
||||
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
|
||||
[]
|
||||
(http://godoc.org/github.com/daglabs/btcutil/gcs)
|
||||
|
||||
Package gcs provides an API for building and using a Golomb-coded set filter
|
||||
similar to that described [here](http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters).
|
||||
|
||||
A comprehensive suite of tests is provided to ensure proper functionality.
|
||||
|
||||
## Installation and Updating
|
||||
|
||||
```bash
|
||||
$ go get -u github.com/daglabs/btcutil/gcs
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Package gcs is licensed under the [copyfree](http://copyfree.org) ISC
|
||||
License.
|
@ -1,410 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/gcs"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
// DefaultP is the default collision probability (2^-20)
|
||||
const DefaultP = 20
|
||||
|
||||
// GCSBuilder is a utility class that makes building GCS filters convenient.
|
||||
type GCSBuilder struct {
|
||||
p uint8
|
||||
key [gcs.KeySize]byte
|
||||
|
||||
// data is a set of entries represented as strings. This is done to
|
||||
// deduplicate items as they are added.
|
||||
data map[string]struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
// RandomKey is a utility function that returns a cryptographically random
|
||||
// [gcs.KeySize]byte usable as a key for a GCS filter.
|
||||
func RandomKey() ([gcs.KeySize]byte, error) {
|
||||
var key [gcs.KeySize]byte
|
||||
|
||||
// Read a byte slice from rand.Reader.
|
||||
randKey := make([]byte, gcs.KeySize)
|
||||
_, err := rand.Read(randKey)
|
||||
|
||||
// This shouldn't happen unless the user is on a system that doesn't
|
||||
// have a system CSPRNG. OK to panic in this case.
|
||||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
|
||||
// Copy the byte slice to a [gcs.KeySize]byte array and return it.
|
||||
copy(key[:], randKey[:])
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// DeriveKey is a utility function that derives a key from a daghash.Hash by
|
||||
// truncating the bytes of the hash to the appopriate key size.
|
||||
func DeriveKey(keyHash *daghash.Hash) [gcs.KeySize]byte {
|
||||
var key [gcs.KeySize]byte
|
||||
copy(key[:], keyHash.CloneBytes()[:])
|
||||
return key
|
||||
}
|
||||
|
||||
// OutpointToFilterEntry is a utility function that derives a filter entry from
|
||||
// a wire.Outpoint in a standardized way for use with both building and
|
||||
// querying filters.
|
||||
func OutpointToFilterEntry(outpoint wire.Outpoint) []byte {
|
||||
// Size of the hash plus size of int32 index
|
||||
data := make([]byte, daghash.HashSize+4)
|
||||
copy(data[:], outpoint.TxID.CloneBytes()[:])
|
||||
binary.LittleEndian.PutUint32(data[daghash.HashSize:], outpoint.Index)
|
||||
return data
|
||||
}
|
||||
|
||||
// Key retrieves the key with which the builder will build a filter. This is
|
||||
// useful if the builder is created with a random initial key.
|
||||
func (b *GCSBuilder) Key() ([gcs.KeySize]byte, error) {
|
||||
// Do nothing if the builder's errored out.
|
||||
if b.err != nil {
|
||||
return [gcs.KeySize]byte{}, b.err
|
||||
}
|
||||
|
||||
return b.key, nil
|
||||
}
|
||||
|
||||
// SetKey sets the key with which the builder will build a filter to the passed
|
||||
// [gcs.KeySize]byte.
|
||||
func (b *GCSBuilder) SetKey(key [gcs.KeySize]byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
copy(b.key[:], key[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// SetKeyFromHash sets the key with which the builder will build a filter to a
|
||||
// key derived from the passed daghash.Hash using DeriveKey().
|
||||
func (b *GCSBuilder) SetKeyFromHash(keyHash *daghash.Hash) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.SetKey(DeriveKey(keyHash))
|
||||
}
|
||||
|
||||
// SetKeyFromTxID is wrapper of SetKeyFromHash for TxID
|
||||
func (b *GCSBuilder) SetKeyFromTxID(keyTxID *daghash.TxID) *GCSBuilder {
|
||||
return b.SetKeyFromHash((*daghash.Hash)(keyTxID))
|
||||
}
|
||||
|
||||
// SetP sets the filter's probability after calling Builder().
|
||||
func (b *GCSBuilder) SetP(p uint8) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
// Basic sanity check.
|
||||
if p > 32 {
|
||||
b.err = gcs.ErrPTooBig
|
||||
return b
|
||||
}
|
||||
|
||||
b.p = p
|
||||
return b
|
||||
}
|
||||
|
||||
// Preallocate sets the estimated filter size after calling Builder() to reduce
|
||||
// the probability of memory reallocations. If the builder has already had data
|
||||
// added to it, Preallocate has no effect.
|
||||
func (b *GCSBuilder) Preallocate(n uint32) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
if b.data == nil {
|
||||
b.data = make(map[string]struct{}, n)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// AddEntry adds a []byte to the list of entries to be included in the GCS
|
||||
// filter when it's built.
|
||||
func (b *GCSBuilder) AddEntry(data []byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
b.data[string(data)] = struct{}{}
|
||||
return b
|
||||
}
|
||||
|
||||
// AddEntries adds all the []byte entries in a [][]byte to the list of entries
|
||||
// to be included in the GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddEntries(data [][]byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
for _, entry := range data {
|
||||
b.AddEntry(entry)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AddOutpoint adds a wire.Outpoint to the list of entries to be included in
|
||||
// the GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddOutpoint(outpoint wire.Outpoint) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.AddEntry(OutpointToFilterEntry(outpoint))
|
||||
}
|
||||
|
||||
// AddHash adds a daghash.Hash to the list of entries to be included in the
|
||||
// GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddHash(hash *daghash.Hash) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.AddEntry(hash.CloneBytes())
|
||||
}
|
||||
|
||||
// AddTxID adds a daghash.TxID to the list of entries to be included in the
|
||||
// GCS filter when it's built.
|
||||
func (b *GCSBuilder) AddTxID(txID *daghash.TxID) *GCSBuilder {
|
||||
return b.AddHash((*daghash.Hash)(txID))
|
||||
}
|
||||
|
||||
// AddScript adds all the data pushed in the script serialized as the passed
|
||||
// []byte to the list of entries to be included in the GCS filter when it's
|
||||
// built.
|
||||
func (b *GCSBuilder) AddScript(script []byte) *GCSBuilder {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
// Ignore errors and add pushed data, if any
|
||||
data, _ := txscript.PushedData(script)
|
||||
if len(data) == 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
return b.AddEntries(data)
|
||||
}
|
||||
|
||||
// Build returns a function which builds a GCS filter with the given parameters
|
||||
// and data.
|
||||
func (b *GCSBuilder) Build() (*gcs.Filter, error) {
|
||||
// Do nothing if the builder's already errored out.
|
||||
if b.err != nil {
|
||||
return nil, b.err
|
||||
}
|
||||
|
||||
dataSlice := make([][]byte, 0, len(b.data))
|
||||
for item := range b.data {
|
||||
dataSlice = append(dataSlice, []byte(item))
|
||||
}
|
||||
|
||||
return gcs.BuildGCSFilter(b.p, b.key, dataSlice)
|
||||
}
|
||||
|
||||
// WithKeyPN creates a GCSBuilder with specified key and the passed probability
|
||||
// and estimated filter size.
|
||||
func WithKeyPN(key [gcs.KeySize]byte, p uint8, n uint32) *GCSBuilder {
|
||||
b := GCSBuilder{}
|
||||
return b.SetKey(key).SetP(p).Preallocate(n)
|
||||
}
|
||||
|
||||
// WithKeyP creates a GCSBuilder with specified key and the passed probability.
|
||||
// Estimated filter size is set to zero, which means more reallocations are
|
||||
// done when building the filter.
|
||||
func WithKeyP(key [gcs.KeySize]byte, p uint8) *GCSBuilder {
|
||||
return WithKeyPN(key, p, 0)
|
||||
}
|
||||
|
||||
// WithKey creates a GCSBuilder with specified key. Probability is set to
|
||||
// 20 (2^-20 collision probability). Estimated filter size is set to zero, which
|
||||
// means more reallocations are done when building the filter.
|
||||
func WithKey(key [gcs.KeySize]byte) *GCSBuilder {
|
||||
return WithKeyPN(key, DefaultP, 0)
|
||||
}
|
||||
|
||||
// WithKeyHashPN creates a GCSBuilder with key derived from the specified
|
||||
// daghash.Hash and the passed probability and estimated filter size.
|
||||
func WithKeyHashPN(keyHash *daghash.Hash, p uint8, n uint32) *GCSBuilder {
|
||||
return WithKeyPN(DeriveKey(keyHash), p, n)
|
||||
}
|
||||
|
||||
// WithKeyHashP creates a GCSBuilder with key derived from the specified
|
||||
// daghash.Hash and the passed probability. Estimated filter size is set to
|
||||
// zero, which means more reallocations are done when building the filter.
|
||||
func WithKeyHashP(keyHash *daghash.Hash, p uint8) *GCSBuilder {
|
||||
return WithKeyHashPN(keyHash, p, 0)
|
||||
}
|
||||
|
||||
// WithKeyTxIDP is wrapper of WithKeyHashP for TxID
|
||||
func WithKeyTxIDP(keyTxID *daghash.TxID, p uint8) *GCSBuilder {
|
||||
return WithKeyHashP((*daghash.Hash)(keyTxID), p)
|
||||
}
|
||||
|
||||
// WithKeyHash creates a GCSBuilder with key derived from the specified
|
||||
// daghash.Hash. Probability is set to 20 (2^-20 collision probability).
|
||||
// Estimated filter size is set to zero, which means more reallocations are
|
||||
// done when building the filter.
|
||||
func WithKeyHash(keyHash *daghash.Hash) *GCSBuilder {
|
||||
return WithKeyHashPN(keyHash, DefaultP, 0)
|
||||
}
|
||||
|
||||
// WithKeyTxID is wrapper of WithKeyHash for transaction ID
|
||||
func WithKeyTxID(keyTxID *daghash.TxID) *GCSBuilder {
|
||||
return WithKeyHash((*daghash.Hash)(keyTxID))
|
||||
}
|
||||
|
||||
// WithRandomKeyPN creates a GCSBuilder with a cryptographically random key and
|
||||
// the passed probability and estimated filter size.
|
||||
func WithRandomKeyPN(p uint8, n uint32) *GCSBuilder {
|
||||
key, err := RandomKey()
|
||||
if err != nil {
|
||||
b := GCSBuilder{err: err}
|
||||
return &b
|
||||
}
|
||||
return WithKeyPN(key, p, n)
|
||||
}
|
||||
|
||||
// WithRandomKeyP creates a GCSBuilder with a cryptographically random key and
|
||||
// the passed probability. Estimated filter size is set to zero, which means
|
||||
// more reallocations are done when building the filter.
|
||||
func WithRandomKeyP(p uint8) *GCSBuilder {
|
||||
return WithRandomKeyPN(p, 0)
|
||||
}
|
||||
|
||||
// WithRandomKey creates a GCSBuilder with a cryptographically random key.
|
||||
// Probability is set to 20 (2^-20 collision probability). Estimated filter
|
||||
// size is set to zero, which means more reallocations are done when
|
||||
// building the filter.
|
||||
func WithRandomKey() *GCSBuilder {
|
||||
return WithRandomKeyPN(DefaultP, 0)
|
||||
}
|
||||
|
||||
// BuildBasicFilter builds a basic GCS filter from a block. A basic GCS filter
|
||||
// will contain all the previous outpoints spent within a block, as well as the
|
||||
// data pushes within all the outputs created within a block.
|
||||
func BuildBasicFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
|
||||
blockHash := block.BlockHash()
|
||||
b := WithKeyHash(blockHash)
|
||||
|
||||
// If the filter had an issue with the specified key, then we force it
|
||||
// to bubble up here by calling the Key() function.
|
||||
_, err := b.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// In order to build a basic filter, we'll range over the entire block,
|
||||
// adding the outpoint data as well as the data pushes within the
|
||||
// scriptPubKey.
|
||||
for i, tx := range block.Transactions {
|
||||
// First we'll compute the bash of the transaction and add that
|
||||
// directly to the filter.
|
||||
b.AddTxID(tx.TxID())
|
||||
|
||||
// Skip the inputs for the coinbase transaction
|
||||
if i != 0 {
|
||||
// Each each txin, we'll add a serialized version of
|
||||
// the txid:index to the filters data slices.
|
||||
for _, txIn := range tx.TxIn {
|
||||
b.AddOutpoint(txIn.PreviousOutpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// For each output in a transaction, we'll add each of the
|
||||
// individual data pushes within the script.
|
||||
for _, txOut := range tx.TxOut {
|
||||
b.AddEntry(txOut.ScriptPubKey)
|
||||
}
|
||||
}
|
||||
|
||||
return b.Build()
|
||||
}
|
||||
|
||||
// BuildExtFilter builds an extended GCS filter from a block. An extended
|
||||
// filter supplements a regular basic filter by include all the scriptSig data
|
||||
// found within a block. This includes all the data pushes within any signature
|
||||
// scripts as well as each element of an input's scriptSig stack. Additionally,
|
||||
// the _hashes_ of each transaction are also inserted into the filter.
|
||||
func BuildExtFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
|
||||
blockHash := block.BlockHash()
|
||||
b := WithKeyHash(blockHash)
|
||||
|
||||
// If the filter had an issue with the specified key, then we force it
|
||||
// to bubble up here by calling the Key() function.
|
||||
_, err := b.Key()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// In order to build an extended filter, we add the hash of each
|
||||
// transaction as well as each piece of data included in the sigScript input.
|
||||
for i, tx := range block.Transactions {
|
||||
// Skip the inputs for the coinbase transaction
|
||||
if i != 0 {
|
||||
// Next, for each input, we'll add the sigScript (if it's present)
|
||||
for _, txIn := range tx.TxIn {
|
||||
if txIn.SignatureScript != nil {
|
||||
b.AddScript(txIn.SignatureScript)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b.Build()
|
||||
}
|
||||
|
||||
// GetFilterHash returns the double-SHA256 of the filter.
|
||||
func GetFilterHash(filter *gcs.Filter) (*daghash.Hash, error) {
|
||||
filterData, err := filter.NBytes()
|
||||
if err != nil {
|
||||
return &daghash.Hash{}, err
|
||||
}
|
||||
|
||||
return daghash.DoubleHashP(filterData), nil
|
||||
}
|
||||
|
||||
// MakeHeaderForFilter makes a filter chain header for a filter, given the
|
||||
// filter and the previous filter chain header.
|
||||
func MakeHeaderForFilter(filter *gcs.Filter, parentHeader *daghash.Hash) (*daghash.Hash, error) {
|
||||
filterTip := make([]byte, 2*daghash.HashSize)
|
||||
filterHash, err := GetFilterHash(filter)
|
||||
if err != nil {
|
||||
return &daghash.Hash{}, err
|
||||
}
|
||||
|
||||
// In the buffer we created above we'll compute hash || parentHash as an
|
||||
// intermediate value.
|
||||
copy(filterTip, filterHash[:])
|
||||
copy(filterTip[daghash.HashSize:], parentHeader[:])
|
||||
|
||||
// The final filter hash is the double-sha256 of the hash computed
|
||||
// above.
|
||||
return daghash.DoubleHashP(filterTip), nil
|
||||
}
|
@ -1,266 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Copyright (c) 2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package builder_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/txscript"
|
||||
"github.com/kaspanet/kaspad/util"
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
"github.com/kaspanet/kaspad/util/gcs"
|
||||
"github.com/kaspanet/kaspad/util/gcs/builder"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
// List of values for building a filter
|
||||
contents = [][]byte{
|
||||
[]byte("Alex"),
|
||||
[]byte("Bob"),
|
||||
[]byte("Charlie"),
|
||||
[]byte("Dick"),
|
||||
[]byte("Ed"),
|
||||
[]byte("Frank"),
|
||||
[]byte("George"),
|
||||
[]byte("Harry"),
|
||||
[]byte("Ilya"),
|
||||
[]byte("John"),
|
||||
[]byte("Kevin"),
|
||||
[]byte("Larry"),
|
||||
[]byte("Michael"),
|
||||
[]byte("Nate"),
|
||||
[]byte("Owen"),
|
||||
[]byte("Paul"),
|
||||
[]byte("Quentin"),
|
||||
}
|
||||
|
||||
testKey = [16]byte{0x4c, 0xb1, 0xab, 0x12, 0x57, 0x62, 0x1e, 0x41,
|
||||
0x3b, 0x8b, 0x0e, 0x26, 0x64, 0x8d, 0x4a, 0x15}
|
||||
|
||||
testHash = "000000000000000000496d7ff9bd2c96154a8d64260e8b3b411e625712abb14c"
|
||||
|
||||
testAddr = "dagcoin:pr54mwe99q7vxh22dth6wmqrstnrec86xctaeyyphz"
|
||||
)
|
||||
|
||||
// TestUseBlockHash tests using a block hash as a filter key.
|
||||
func TestUseBlockHash(t *testing.T) {
|
||||
// Block hash #448710, pretty high difficulty.
|
||||
txID, err := daghash.NewTxIDFromStr(testHash)
|
||||
if err != nil {
|
||||
t.Fatalf("Hash from string failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// wire.Outpoint
|
||||
outpoint := wire.Outpoint{
|
||||
TxID: *txID,
|
||||
Index: 4321,
|
||||
}
|
||||
|
||||
// util.Address
|
||||
addr, err := util.DecodeAddress(testAddr, util.Bech32PrefixDAGCoin)
|
||||
if err != nil {
|
||||
t.Fatalf("Address decode failed: %s", err.Error())
|
||||
}
|
||||
addrBytes, err := txscript.PayToAddrScript(addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Address script build failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// Create a GCSBuilder with a key hash and check that the key is derived
|
||||
// correctly, then test it.
|
||||
b := builder.WithKeyTxID(txID)
|
||||
key, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with key hash failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not derived correctly from key hash:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, txID, builder.DefaultP, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a key hash and non-default P and test it.
|
||||
b = builder.WithKeyTxIDP(txID, 30)
|
||||
BuilderTest(b, txID, 30, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a random key, set the key from a hash
|
||||
// manually, check that the key is correct, and test it.
|
||||
b = builder.WithRandomKey()
|
||||
b.SetKeyFromTxID(txID)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not copied correctly from known key:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, txID, builder.DefaultP, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a random key and test it.
|
||||
b = builder.WithRandomKey()
|
||||
key1, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with random key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
t.Logf("Random Key 1: %s", hex.EncodeToString(key1[:]))
|
||||
BuilderTest(b, txID, builder.DefaultP, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a random key and non-default P and test it.
|
||||
b = builder.WithRandomKeyP(30)
|
||||
key2, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with random key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
t.Logf("Random Key 2: %s", hex.EncodeToString(key2[:]))
|
||||
if key2 == key1 {
|
||||
t.Fatalf("Random keys are the same!")
|
||||
}
|
||||
BuilderTest(b, txID, 30, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a known key and test it.
|
||||
b = builder.WithKey(testKey)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not copied correctly from known key:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, txID, builder.DefaultP, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a known key and non-default P and test it.
|
||||
b = builder.WithKeyP(testKey, 30)
|
||||
key, err = b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with known key failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
if key != testKey {
|
||||
t.Fatalf("Key not copied correctly from known key:\n%s\n%s",
|
||||
hex.EncodeToString(key[:]),
|
||||
hex.EncodeToString(testKey[:]))
|
||||
}
|
||||
BuilderTest(b, txID, 30, outpoint, addrBytes, t)
|
||||
|
||||
// Create a GCSBuilder with a known key and too-high P and ensure error
|
||||
// works throughout all functions that use it.
|
||||
b = builder.WithRandomKeyP(33).SetKeyFromTxID(txID).SetKey(testKey)
|
||||
b.SetP(30).AddEntry(txID.CloneBytes()).AddEntries(contents)
|
||||
b.AddOutpoint(outpoint).AddTxID(txID).AddScript(addrBytes)
|
||||
_, err = b.Key()
|
||||
if err != gcs.ErrPTooBig {
|
||||
t.Fatalf("No error on P too big!")
|
||||
}
|
||||
_, err = b.Build()
|
||||
if err != gcs.ErrPTooBig {
|
||||
t.Fatalf("No error on P too big!")
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderTest(b *builder.GCSBuilder, txID *daghash.TxID, p uint8,
|
||||
outpoint wire.Outpoint, addrBytes []byte, t *testing.T) {
|
||||
|
||||
key, err := b.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("Builder instantiation with key hash failed: %s",
|
||||
err.Error())
|
||||
}
|
||||
|
||||
// Build a filter and test matches.
|
||||
b.AddEntries(contents)
|
||||
f, err := b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
if f.P() != p {
|
||||
t.Fatalf("Filter built with wrong probability")
|
||||
}
|
||||
match, err := f.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = f.Match(key, []byte("weks"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!",
|
||||
builder.DefaultP)
|
||||
}
|
||||
|
||||
// Add a txID, build a filter, and test matches
|
||||
b.AddTxID(txID)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.Match(key, txID.CloneBytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
|
||||
// Add a wire.Outpoint, build a filter, and test matches
|
||||
b.AddOutpoint(outpoint)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
match, err = f.Match(key, txID.CloneBytes())
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
|
||||
// Add a script, build a filter, and test matches
|
||||
b.AddScript(addrBytes)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
pushedData, err := txscript.PushedData(addrBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't extract pushed data from addrBytes script: %s",
|
||||
err.Error())
|
||||
}
|
||||
match, err = f.MatchAny(key, pushedData)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
|
||||
// Check that adding duplicate items does not increase filter size.
|
||||
originalSize := f.N()
|
||||
b.AddScript(addrBytes)
|
||||
f, err = b.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
if f.N() != originalSize {
|
||||
t.Fatal("Filter size increased with duplicate items")
|
||||
}
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gcs provides an API for building and using a Golomb-coded set filter.
|
||||
|
||||
Golomb-Coded Set
|
||||
|
||||
A Golomb-coded set is a probabilistic data structure used similarly to a Bloom
|
||||
filter. A filter uses constant-size overhead plus on average n+2 bits per
|
||||
item added to the filter, where 2^-n is the desired false positive (collision)
|
||||
probability.
|
||||
|
||||
GCS use in Bitcoin
|
||||
|
||||
GCS filters are a proposed mechanism for storing and transmitting per-block
|
||||
filters in Bitcoin. The usage is intended to be the inverse of Bloom filters:
|
||||
a full node would send an SPV node the GCS filter for a block, which the SPV
|
||||
node would check against its list of relevant items. The suggested collision
|
||||
probability for Bitcoin use is 2^-20.
|
||||
*/
|
||||
package gcs
|
454
util/gcs/gcs.go
454
util/gcs/gcs.go
@ -1,454 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/aead/siphash"
|
||||
"github.com/kaspanet/kaspad/wire"
|
||||
"github.com/kkdai/bstream"
|
||||
)
|
||||
|
||||
// Inspired by https://github.com/rasky/gcs
|
||||
|
||||
var (
|
||||
// ErrNTooBig signifies that the filter can't handle N items.
|
||||
ErrNTooBig = errors.Errorf("N is too big to fit in uint32")
|
||||
|
||||
// ErrPTooBig signifies that the filter can't handle `1/2**P`
|
||||
// collision probability.
|
||||
ErrPTooBig = errors.Errorf("P is too big to fit in uint32")
|
||||
)
|
||||
|
||||
const (
|
||||
// KeySize is the size of the byte array required for key material for
|
||||
// the SipHash keyed hash function.
|
||||
KeySize = 16
|
||||
|
||||
// varIntProtoVer is the protocol version to use for serializing N as a
|
||||
// VarInt.
|
||||
varIntProtoVer uint32 = 0
|
||||
)
|
||||
|
||||
// fastReduction calculates a mapping that's more ore less equivalent to: x mod
|
||||
// N. However, instead of using a mod operation, which using a non-power of two
|
||||
// will lead to slowness on many processors due to unnecessary division, we
|
||||
// instead use a "multiply-and-shift" trick which eliminates all divisions,
|
||||
// described in:
|
||||
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
//
|
||||
// * v * N >> log_2(N)
|
||||
//
|
||||
// In our case, using 64-bit integers, log_2 is 64. As most processors don't
|
||||
// support 128-bit arithmetic natively, we'll be super portable and unfold the
|
||||
// operation into several operations with 64-bit arithmetic. As inputs, we the
|
||||
// number to reduce, and our modulus N divided into its high 32-bits and lower
|
||||
// 32-bits.
|
||||
func fastReduction(v, nHi, nLo uint64) uint64 {
|
||||
|
||||
// First, we'll spit the item we need to reduce into its higher and
|
||||
// lower bits.
|
||||
vhi := v >> 32
|
||||
vlo := uint64(uint32(v))
|
||||
|
||||
// Then, we distribute multiplication over each part.
|
||||
vnphi := vhi * nHi
|
||||
vnpmid := vhi * nLo
|
||||
npvmid := nHi * vlo
|
||||
vnplo := vlo * nLo
|
||||
|
||||
// We calculate the carry bit.
|
||||
carry := (uint64(uint32(vnpmid)) + uint64(uint32(npvmid)) +
|
||||
(vnplo >> 32)) >> 32
|
||||
|
||||
// Last, we add the high bits, the middle bits, and the carry.
|
||||
v = vnphi + (vnpmid >> 32) + (npvmid >> 32) + carry
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Filter describes an immutable filter that can be built from a set of data
|
||||
// elements, serialized, deserialized, and queried in a thread-safe manner. The
|
||||
// serialized form is compressed as a Golomb Coded Set (GCS), but does not
|
||||
// include N or P to allow the user to encode the metadata separately if
|
||||
// necessary. The hash function used is SipHash, a keyed function; the key used
|
||||
// in building the filter is required in order to match filter values and is
|
||||
// not included in the serialized form.
|
||||
type Filter struct {
|
||||
n uint32
|
||||
p uint8
|
||||
modulusNP uint64
|
||||
filterData []byte
|
||||
}
|
||||
|
||||
// BuildGCSFilter builds a new GCS filter with the collision probability of
|
||||
// `1/(2**P)`, key `key`, and including every `[]byte` in `data` as a member of
|
||||
// the set.
|
||||
func BuildGCSFilter(P uint8, key [KeySize]byte, data [][]byte) (*Filter, error) {
|
||||
// Some initial parameter checks: make sure we have data from which to
|
||||
// build the filter, and make sure our parameters will fit the hash
|
||||
// function we're using.
|
||||
if uint64(len(data)) >= (1 << 32) {
|
||||
return nil, ErrNTooBig
|
||||
}
|
||||
if P > 32 {
|
||||
return nil, ErrPTooBig
|
||||
}
|
||||
|
||||
// Create the filter object and insert metadata.
|
||||
f := Filter{
|
||||
n: uint32(len(data)),
|
||||
p: P,
|
||||
}
|
||||
f.modulusNP = uint64(f.n) << P
|
||||
|
||||
// Shortcut if the filter is empty.
|
||||
if f.n == 0 {
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// Build the filter.
|
||||
values := make(uint64Slice, 0, len(data))
|
||||
b := bstream.NewBStreamWriter(0)
|
||||
|
||||
// Insert the hash (fast-ranged over a space of N*P) of each data
|
||||
// element into a slice and sort the slice. This can be greatly
|
||||
// optimized with native 128-bit multiplication, but we're going to be
|
||||
// fully portable for now.
|
||||
//
|
||||
// First, we cache the high and low bits of modulusNP for the
|
||||
// multiplication of 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
for _, d := range data {
|
||||
// For each datum, we assign the initial hash to a uint64.
|
||||
v := siphash.Sum64(d, &key)
|
||||
|
||||
v = fastReduction(v, nphi, nplo)
|
||||
values = append(values, v)
|
||||
}
|
||||
sort.Sort(values)
|
||||
|
||||
// Write the sorted list of values into the filter bitstream,
|
||||
// compressing it using Golomb coding.
|
||||
var value, lastValue, remainder uint64
|
||||
for _, v := range values {
|
||||
// Calculate the difference between this value and the last,
|
||||
// modulo P.
|
||||
remainder = (v - lastValue) & ((uint64(1) << P) - 1)
|
||||
|
||||
// Calculate the difference between this value and the last,
|
||||
// divided by P.
|
||||
value = (v - lastValue - remainder) >> f.p
|
||||
lastValue = v
|
||||
|
||||
// Write the P multiple into the bitstream in unary; the
|
||||
// average should be around 1 (2 bits - 0b10).
|
||||
for value > 0 {
|
||||
b.WriteBit(true)
|
||||
value--
|
||||
}
|
||||
b.WriteBit(false)
|
||||
|
||||
// Write the remainder as a big-endian integer with enough bits
|
||||
// to represent the appropriate collision probability.
|
||||
b.WriteBits(remainder, int(f.p))
|
||||
}
|
||||
|
||||
// Copy the bitstream into the filter object and return the object.
|
||||
f.filterData = b.Bytes()
|
||||
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// FromBytes deserializes a GCS filter from a known N, P, and serialized filter
|
||||
// as returned by Bytes().
|
||||
func FromBytes(N uint32, P uint8, d []byte) (*Filter, error) {
|
||||
|
||||
// Basic sanity check.
|
||||
if P > 32 {
|
||||
return nil, ErrPTooBig
|
||||
}
|
||||
|
||||
// Create the filter object and insert metadata.
|
||||
f := &Filter{
|
||||
n: N,
|
||||
p: P,
|
||||
}
|
||||
f.modulusNP = uint64(f.n) << P
|
||||
|
||||
// Copy the filter.
|
||||
f.filterData = make([]byte, len(d))
|
||||
copy(f.filterData, d)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// FromNBytes deserializes a GCS filter from a known P, and serialized N and
|
||||
// filter as returned by NBytes().
|
||||
func FromNBytes(P uint8, d []byte) (*Filter, error) {
|
||||
buffer := bytes.NewBuffer(d)
|
||||
N, err := wire.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if N >= (1 << 32) {
|
||||
return nil, ErrNTooBig
|
||||
}
|
||||
return FromBytes(uint32(N), P, buffer.Bytes())
|
||||
}
|
||||
|
||||
// FromPBytes deserializes a GCS filter from a known N, and serialized P and
|
||||
// filter as returned by NBytes().
|
||||
func FromPBytes(N uint32, d []byte) (*Filter, error) {
|
||||
return FromBytes(N, d[0], d[1:])
|
||||
}
|
||||
|
||||
// FromNPBytes deserializes a GCS filter from a serialized N, P, and filter as
|
||||
// returned by NPBytes().
|
||||
func FromNPBytes(d []byte) (*Filter, error) {
|
||||
buffer := bytes.NewBuffer(d)
|
||||
|
||||
N, err := wire.ReadVarInt(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if N >= (1 << 32) {
|
||||
return nil, ErrNTooBig
|
||||
}
|
||||
|
||||
P, err := buffer.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromBytes(uint32(N), P, buffer.Bytes())
|
||||
}
|
||||
|
||||
// Bytes returns the serialized format of the GCS filter, which does not
|
||||
// include N or P (returned by separate methods) or the key used by SipHash.
|
||||
func (f *Filter) Bytes() ([]byte, error) {
|
||||
filterData := make([]byte, len(f.filterData))
|
||||
copy(filterData, f.filterData)
|
||||
return filterData, nil
|
||||
}
|
||||
|
||||
// NBytes returns the serialized format of the GCS filter with N, which does
|
||||
// not include P (returned by a separate method) or the key used by SipHash.
|
||||
func (f *Filter) NBytes() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.Grow(wire.VarIntSerializeSize(uint64(f.n)) + len(f.filterData))
|
||||
|
||||
err := wire.WriteVarInt(&buffer, uint64(f.n))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = buffer.Write(f.filterData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// PBytes returns the serialized format of the GCS filter with P, which does
|
||||
// not include N (returned by a separate method) or the key used by SipHash.
|
||||
func (f *Filter) PBytes() ([]byte, error) {
|
||||
filterData := make([]byte, len(f.filterData)+1)
|
||||
filterData[0] = f.p
|
||||
copy(filterData[1:], f.filterData)
|
||||
return filterData, nil
|
||||
}
|
||||
|
||||
// NPBytes returns the serialized format of the GCS filter with N and P, which
|
||||
// does not include the key used by SipHash.
|
||||
func (f *Filter) NPBytes() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.Grow(wire.VarIntSerializeSize(uint64(f.n)) + 1 + len(f.filterData))
|
||||
|
||||
err := wire.WriteVarInt(&buffer, uint64(f.n))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = buffer.WriteByte(f.p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = buffer.Write(f.filterData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// P returns the filter's collision probability as a negative power of 2 (that
|
||||
// is, a collision probability of `1/2**20` is represented as 20).
|
||||
func (f *Filter) P() uint8 {
|
||||
return f.p
|
||||
}
|
||||
|
||||
// N returns the size of the data set used to build the filter.
|
||||
func (f *Filter) N() uint32 {
|
||||
return f.n
|
||||
}
|
||||
|
||||
// Match checks whether a []byte value is likely (within collision probability)
|
||||
// to be a member of the set represented by the filter.
|
||||
func (f *Filter) Match(key [KeySize]byte, data []byte) (bool, error) {
|
||||
|
||||
// Create a filter bitstream.
|
||||
filterData, err := f.Bytes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b := bstream.NewBStreamReader(filterData)
|
||||
|
||||
// We take the high and low bits of modulusNP for the multiplication
|
||||
// of 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
|
||||
// Then we hash our search term with the same parameters as the filter.
|
||||
term := siphash.Sum64(data, &key)
|
||||
term = fastReduction(term, nphi, nplo)
|
||||
|
||||
// Go through the search filter and look for the desired value.
|
||||
var lastValue uint64
|
||||
for lastValue < term {
|
||||
|
||||
// Read the difference between previous and new value from
|
||||
// bitstream.
|
||||
value, err := f.readFullUint64(b)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Add the previous value to it.
|
||||
value += lastValue
|
||||
if value == term {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
lastValue = value
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// MatchAny returns checks whether any []byte value is likely (within collision
|
||||
// probability) to be a member of the set represented by the filter faster than
|
||||
// calling Match() for each value individually.
|
||||
func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
|
||||
|
||||
// Basic sanity check.
|
||||
if len(data) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Create a filter bitstream.
|
||||
filterData, err := f.Bytes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
b := bstream.NewBStreamReader(filterData)
|
||||
|
||||
// Create an uncompressed filter of the search values.
|
||||
values := make(uint64Slice, 0, len(data))
|
||||
|
||||
// First, we cache the high and low bits of modulusNP for the
|
||||
// multiplication of 2 64-bit integers into a 128-bit integer.
|
||||
nphi := f.modulusNP >> 32
|
||||
nplo := uint64(uint32(f.modulusNP))
|
||||
for _, d := range data {
|
||||
// For each datum, we assign the initial hash to a uint64.
|
||||
v := siphash.Sum64(d, &key)
|
||||
|
||||
// We'll then reduce the value down to the range of our
|
||||
// modulus.
|
||||
v = fastReduction(v, nphi, nplo)
|
||||
values = append(values, v)
|
||||
}
|
||||
sort.Sort(values)
|
||||
|
||||
// Zip down the filters, comparing values until we either run out of
|
||||
// values to compare in one of the filters or we reach a matching
|
||||
// value.
|
||||
var lastValue1, lastValue2 uint64
|
||||
lastValue2 = values[0]
|
||||
i := 1
|
||||
for lastValue1 != lastValue2 {
|
||||
// Check which filter to advance to make sure we're comparing
|
||||
// the right values.
|
||||
switch {
|
||||
case lastValue1 > lastValue2:
|
||||
// Advance filter created from search terms or return
|
||||
// false if we're at the end because nothing matched.
|
||||
if i < len(values) {
|
||||
lastValue2 = values[i]
|
||||
i++
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
case lastValue2 > lastValue1:
|
||||
// Advance filter we're searching or return false if
|
||||
// we're at the end because nothing matched.
|
||||
value, err := f.readFullUint64(b)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
lastValue1 += value
|
||||
}
|
||||
}
|
||||
|
||||
// If we've made it this far, an element matched between filters so we
|
||||
// return true.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// readFullUint64 reads a value represented by the sum of a unary multiple of
|
||||
// the filter's P modulus (`2**P`) and a big-endian P-bit remainder.
|
||||
func (f *Filter) readFullUint64(b *bstream.BStream) (uint64, error) {
|
||||
var quotient uint64
|
||||
|
||||
// Count the 1s until we reach a 0.
|
||||
c, err := b.ReadBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for c {
|
||||
quotient++
|
||||
c, err = b.ReadBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Read P bits.
|
||||
remainder, err := b.ReadBits(int(f.p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Add the multiple and the remainder.
|
||||
v := (quotient << f.p) + remainder
|
||||
return v, nil
|
||||
}
|
@ -1,283 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/util/gcs"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
// No need to allocate an err variable in every test
|
||||
err error
|
||||
|
||||
// Collision probability for the tests (1/2**20)
|
||||
P = uint8(20)
|
||||
|
||||
// Filters are conserved between tests but we must define with an
|
||||
// interface which functions we're testing because the gcsFilter type
|
||||
// isn't exported
|
||||
filter, filter2, filter3, filter4, filter5 *gcs.Filter
|
||||
|
||||
// We need to use the same key for building and querying the filters
|
||||
key [gcs.KeySize]byte
|
||||
|
||||
// List of values for building a filter
|
||||
contents = [][]byte{
|
||||
[]byte("Alex"),
|
||||
[]byte("Bob"),
|
||||
[]byte("Charlie"),
|
||||
[]byte("Dick"),
|
||||
[]byte("Ed"),
|
||||
[]byte("Frank"),
|
||||
[]byte("George"),
|
||||
[]byte("Harry"),
|
||||
[]byte("Ilya"),
|
||||
[]byte("John"),
|
||||
[]byte("Kevin"),
|
||||
[]byte("Larry"),
|
||||
[]byte("Michael"),
|
||||
[]byte("Nate"),
|
||||
[]byte("Owen"),
|
||||
[]byte("Paul"),
|
||||
[]byte("Quentin"),
|
||||
}
|
||||
|
||||
// List of values for querying a filter using MatchAny()
|
||||
contents2 = [][]byte{
|
||||
[]byte("Alice"),
|
||||
[]byte("Betty"),
|
||||
[]byte("Charmaine"),
|
||||
[]byte("Donna"),
|
||||
[]byte("Edith"),
|
||||
[]byte("Faina"),
|
||||
[]byte("Georgia"),
|
||||
[]byte("Hannah"),
|
||||
[]byte("Ilsbeth"),
|
||||
[]byte("Jennifer"),
|
||||
[]byte("Kayla"),
|
||||
[]byte("Lena"),
|
||||
[]byte("Michelle"),
|
||||
[]byte("Natalie"),
|
||||
[]byte("Ophelia"),
|
||||
[]byte("Peggy"),
|
||||
[]byte("Queenie"),
|
||||
}
|
||||
)
|
||||
|
||||
// TestGCSFilterBuild builds a test filter with a randomized key. For Bitcoin
|
||||
// use, deterministic filter generation is desired. Therefore, a key that's
|
||||
// derived deterministically would be required.
|
||||
func TestGCSFilterBuild(t *testing.T) {
|
||||
for i := 0; i < gcs.KeySize; i += 4 {
|
||||
binary.BigEndian.PutUint32(key[i:], rand.Uint32())
|
||||
}
|
||||
filter, err = gcs.BuildGCSFilter(P, key, contents)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter build failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterCopy deserializes and serializes a filter to create a copy.
|
||||
func TestGCSFilterCopy(t *testing.T) {
|
||||
serialized2, err := filter.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
filter2, err = gcs.FromBytes(filter.N(), P, serialized2)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter copy failed: %s", err.Error())
|
||||
}
|
||||
serialized3, err := filter.NBytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter NBytes() failed: %v", err)
|
||||
}
|
||||
filter3, err = gcs.FromNBytes(filter.P(), serialized3)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter copy failed: %s", err.Error())
|
||||
}
|
||||
serialized4, err := filter.PBytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter PBytes() failed: %v", err)
|
||||
}
|
||||
filter4, err = gcs.FromPBytes(filter.N(), serialized4)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter copy failed: %s", err.Error())
|
||||
}
|
||||
serialized5, err := filter.NPBytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter NPBytes() failed: %v", err)
|
||||
}
|
||||
filter5, err = gcs.FromNPBytes(serialized5)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter copy failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterMetadata checks that the filter metadata is built and copied
|
||||
// correctly.
|
||||
func TestGCSFilterMetadata(t *testing.T) {
|
||||
if filter.P() != P {
|
||||
t.Fatal("P not correctly stored in filter metadata")
|
||||
}
|
||||
if filter.N() != uint32(len(contents)) {
|
||||
t.Fatal("N not correctly stored in filter metadata")
|
||||
}
|
||||
if filter.P() != filter2.P() {
|
||||
t.Fatal("P doesn't match between copied filters")
|
||||
}
|
||||
if filter.P() != filter3.P() {
|
||||
t.Fatal("P doesn't match between copied filters")
|
||||
}
|
||||
if filter.P() != filter4.P() {
|
||||
t.Fatal("P doesn't match between copied filters")
|
||||
}
|
||||
if filter.P() != filter5.P() {
|
||||
t.Fatal("P doesn't match between copied filters")
|
||||
}
|
||||
if filter.N() != filter2.N() {
|
||||
t.Fatal("N doesn't match between copied filters")
|
||||
}
|
||||
if filter.N() != filter3.N() {
|
||||
t.Fatal("N doesn't match between copied filters")
|
||||
}
|
||||
if filter.N() != filter4.N() {
|
||||
t.Fatal("N doesn't match between copied filters")
|
||||
}
|
||||
if filter.N() != filter5.N() {
|
||||
t.Fatal("N doesn't match between copied filters")
|
||||
}
|
||||
serialized, err := filter.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
serialized2, err := filter2.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized2) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
serialized3, err := filter3.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized3) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
serialized4, err := filter3.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized4) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
serialized5, err := filter5.Bytes()
|
||||
if err != nil {
|
||||
t.Fatalf("Filter Bytes() failed: %v", err)
|
||||
}
|
||||
if !bytes.Equal(serialized, serialized5) {
|
||||
t.Fatal("Bytes don't match between copied filters")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterMatch checks that both the built and copied filters match
|
||||
// correctly, logging any false positives without failing on them.
|
||||
func TestGCSFilterMatch(t *testing.T) {
|
||||
match, err := filter.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter.Match(key, []byte("Quentin"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Quentin"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match when it should have!")
|
||||
}
|
||||
match, err = filter.Match(key, []byte("Nates"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Nates"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter.Match(key, []byte("Quentins"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter2.Match(key, []byte("Quentins"))
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGCSFilterMatchAny checks that both the built and copied filters match a
|
||||
// list correctly, logging any false positives without failing on them.
|
||||
func TestGCSFilterMatchAny(t *testing.T) {
|
||||
match, err := filter.MatchAny(key, contents2)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
match, err = filter2.MatchAny(key, contents2)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if match {
|
||||
t.Logf("False positive match, should be 1 in 2**%d!", P)
|
||||
}
|
||||
contents2 = append(contents2, []byte("Nate"))
|
||||
match, err = filter.MatchAny(key, contents2)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match any when it should have!")
|
||||
}
|
||||
match, err = filter2.MatchAny(key, contents2)
|
||||
if err != nil {
|
||||
t.Fatalf("Filter match any failed: %s", err.Error())
|
||||
}
|
||||
if !match {
|
||||
t.Fatal("Filter didn't match any when it should have!")
|
||||
}
|
||||
}
|
@ -1,131 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/util/gcs"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func genRandFilterElements(numElements uint) ([][]byte, error) {
|
||||
testContents := make([][]byte, numElements)
|
||||
for i := range contents {
|
||||
randElem := make([]byte, 32)
|
||||
if _, err := rand.Read(randElem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
testContents[i] = randElem
|
||||
}
|
||||
|
||||
return testContents, nil
|
||||
}
|
||||
|
||||
var (
|
||||
generatedFilter *gcs.Filter
|
||||
filterErr error
|
||||
)
|
||||
|
||||
// BenchmarkGCSFilterBuild benchmarks building a filter.
|
||||
func BenchmarkGCSFilterBuild50000(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var testKey [gcs.KeySize]byte
|
||||
for i := 0; i < gcs.KeySize; i += 4 {
|
||||
binary.BigEndian.PutUint32(testKey[i:], rand.Uint32())
|
||||
}
|
||||
randFilterElems, genErr := genRandFilterElements(50000)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate random item: %v", genErr)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
var localFilter *gcs.Filter
|
||||
for i := 0; i < b.N; i++ {
|
||||
localFilter, err = gcs.BuildGCSFilter(P, key,
|
||||
randFilterElems)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate filter: %v", err)
|
||||
}
|
||||
}
|
||||
generatedFilter = localFilter
|
||||
}
|
||||
|
||||
// BenchmarkGCSFilterBuild benchmarks building a filter.
|
||||
func BenchmarkGCSFilterBuild100000(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var testKey [gcs.KeySize]byte
|
||||
for i := 0; i < gcs.KeySize; i += 4 {
|
||||
binary.BigEndian.PutUint32(testKey[i:], rand.Uint32())
|
||||
}
|
||||
randFilterElems, genErr := genRandFilterElements(100000)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate random item: %v", genErr)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
var localFilter *gcs.Filter
|
||||
for i := 0; i < b.N; i++ {
|
||||
localFilter, err = gcs.BuildGCSFilter(P, key,
|
||||
randFilterElems)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to generate filter: %v", err)
|
||||
}
|
||||
}
|
||||
generatedFilter = localFilter
|
||||
}
|
||||
|
||||
var (
|
||||
match bool
|
||||
)
|
||||
|
||||
// BenchmarkGCSFilterMatch benchmarks querying a filter for a single value.
|
||||
func BenchmarkGCSFilterMatch(b *testing.B) {
|
||||
b.StopTimer()
|
||||
filter, err := gcs.BuildGCSFilter(P, key, contents)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to build filter")
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
var (
|
||||
localMatch bool
|
||||
)
|
||||
for i := 0; i < b.N; i++ {
|
||||
localMatch, err = filter.Match(key, []byte("Nate"))
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
|
||||
localMatch, err = filter.Match(key, []byte("Nates"))
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
}
|
||||
match = localMatch
|
||||
}
|
||||
|
||||
// BenchmarkGCSFilterMatchAny benchmarks querying a filter for a list of
|
||||
// values.
|
||||
func BenchmarkGCSFilterMatchAny(b *testing.B) {
|
||||
b.StopTimer()
|
||||
filter, err := gcs.BuildGCSFilter(P, key, contents)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to build filter")
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
var (
|
||||
localMatch bool
|
||||
)
|
||||
for i := 0; i < b.N; i++ {
|
||||
localMatch, err = filter.MatchAny(key, contents2)
|
||||
if err != nil {
|
||||
b.Fatalf("unable to match filter: %v", err)
|
||||
}
|
||||
}
|
||||
match = localMatch
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
// Copyright (c) 2016-2017 The btcsuite developers
|
||||
// Copyright (c) 2016-2017 The Lightning Network Developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcs
|
||||
|
||||
// uint64slice is a package-local utility class that allows us to use Go's sort
|
||||
// package to sort a []uint64 by implementing sort.Interface.
|
||||
type uint64Slice []uint64
|
||||
|
||||
// Len returns the length of the slice.
|
||||
func (p uint64Slice) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
// Less returns true when the ith element is smaller than the jth element of
|
||||
// the slice, and returns false otherwise.
|
||||
func (p uint64Slice) Less(i, j int) bool {
|
||||
return p[i] < p[j]
|
||||
}
|
||||
|
||||
// Swap swaps two slice elements.
|
||||
func (p uint64Slice) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
@ -50,12 +50,6 @@ const (
|
||||
CmdReject = "reject"
|
||||
CmdSendHeaders = "sendheaders"
|
||||
CmdFeeFilter = "feefilter"
|
||||
CmdGetCFilters = "getcfilters"
|
||||
CmdGetCFHeaders = "getcfheaders"
|
||||
CmdGetCFCheckpt = "getcfcheckpt"
|
||||
CmdCFilter = "cfilter"
|
||||
CmdCFHeaders = "cfheaders"
|
||||
CmdCFCheckpt = "cfcheckpt"
|
||||
CmdGetBlockLocator = "getlocator"
|
||||
CmdBlockLocator = "locator"
|
||||
)
|
||||
@ -145,24 +139,6 @@ func makeEmptyMessage(command string) (Message, error) {
|
||||
case CmdFeeFilter:
|
||||
msg = &MsgFeeFilter{}
|
||||
|
||||
case CmdGetCFilters:
|
||||
msg = &MsgGetCFilters{}
|
||||
|
||||
case CmdGetCFHeaders:
|
||||
msg = &MsgGetCFHeaders{}
|
||||
|
||||
case CmdGetCFCheckpt:
|
||||
msg = &MsgGetCFCheckpt{}
|
||||
|
||||
case CmdCFilter:
|
||||
msg = &MsgCFilter{}
|
||||
|
||||
case CmdCFHeaders:
|
||||
msg = &MsgCFHeaders{}
|
||||
|
||||
case CmdCFCheckpt:
|
||||
msg = &MsgCFCheckpt{}
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("unhandled command [%s]", command)
|
||||
}
|
||||
|
@ -71,13 +71,6 @@ func TestMessage(t *testing.T) {
|
||||
bh := NewBlockHeader(1, []*daghash.Hash{mainNetGenesisHash, simNetGenesisHash}, &daghash.Hash{}, &daghash.Hash{}, &daghash.Hash{}, 0, 0)
|
||||
msgMerkleBlock := NewMsgMerkleBlock(bh)
|
||||
msgReject := NewMsgReject("block", RejectDuplicate, "duplicate block")
|
||||
msgGetCFilters := NewMsgGetCFilters(GCSFilterExtended, 0, &daghash.Hash{})
|
||||
msgGetCFHeaders := NewMsgGetCFHeaders(GCSFilterExtended, 0, &daghash.Hash{})
|
||||
msgGetCFCheckpt := NewMsgGetCFCheckpt(GCSFilterExtended, &daghash.Hash{})
|
||||
msgCFilter := NewMsgCFilter(GCSFilterExtended, &daghash.Hash{},
|
||||
[]byte("payload"))
|
||||
msgCFHeaders := NewMsgCFHeaders()
|
||||
msgCFCheckpt := NewMsgCFCheckpt(GCSFilterExtended, &daghash.Hash{}, 0)
|
||||
|
||||
tests := []struct {
|
||||
in Message // Value to encode
|
||||
@ -109,12 +102,6 @@ func TestMessage(t *testing.T) {
|
||||
{msgFilterLoad, msgFilterLoad, pver, MainNet, 35},
|
||||
{msgMerkleBlock, msgMerkleBlock, pver, MainNet, 215},
|
||||
{msgReject, msgReject, pver, MainNet, 79},
|
||||
{msgGetCFilters, msgGetCFilters, pver, MainNet, 65},
|
||||
{msgGetCFHeaders, msgGetCFHeaders, pver, MainNet, 65},
|
||||
{msgGetCFCheckpt, msgGetCFCheckpt, pver, MainNet, 57},
|
||||
{msgCFilter, msgCFilter, pver, MainNet, 65},
|
||||
{msgCFHeaders, msgCFHeaders, pver, MainNet, 90},
|
||||
{msgCFCheckpt, msgCFCheckpt, pver, MainNet, 58},
|
||||
}
|
||||
|
||||
t.Logf("Running %d tests", len(tests))
|
||||
|
@ -1,150 +0,0 @@
|
||||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
const (
|
||||
// CFCheckptInterval is the gap (in number of blocks) between each
|
||||
// filter header checkpoint.
|
||||
CFCheckptInterval = 1000
|
||||
)
|
||||
|
||||
// MsgCFCheckpt implements the Message interface and represents a bitcoin
|
||||
// cfcheckpt message. It is used to deliver committed filter header information
|
||||
// in response to a getcfcheckpt message (MsgGetCFCheckpt). See MsgGetCFCheckpt
|
||||
// for details on requesting the headers.
|
||||
type MsgCFCheckpt struct {
|
||||
FilterType FilterType
|
||||
StopHash *daghash.Hash
|
||||
FilterHeaders []*daghash.Hash
|
||||
}
|
||||
|
||||
// AddCFHeader adds a new committed filter header to the message.
|
||||
func (msg *MsgCFCheckpt) AddCFHeader(header *daghash.Hash) error {
|
||||
if len(msg.FilterHeaders) == cap(msg.FilterHeaders) {
|
||||
str := fmt.Sprintf("FilterHeaders has insufficient capacity for "+
|
||||
"additional header: len = %d", len(msg.FilterHeaders))
|
||||
return messageError("MsgCFCheckpt.AddCFHeader", str)
|
||||
}
|
||||
|
||||
msg.FilterHeaders = append(msg.FilterHeaders, header)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32) error {
|
||||
// Read filter type
|
||||
err := ReadElement(r, &msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read stop hash
|
||||
msg.StopHash = &daghash.Hash{}
|
||||
err = ReadElement(r, msg.StopHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read number of filter headers
|
||||
count, err := ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a contiguous slice of hashes to deserialize into in order to
|
||||
// reduce the number of allocations.
|
||||
msg.FilterHeaders = make([]*daghash.Hash, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
var cfh daghash.Hash
|
||||
err := ReadElement(r, &cfh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.FilterHeaders[i] = &cfh
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgCFCheckpt) BtcEncode(w io.Writer, pver uint32) error {
|
||||
// Write filter type
|
||||
err := WriteElement(w, msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write stop hash
|
||||
err = WriteElement(w, msg.StopHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write length of FilterHeaders slice
|
||||
count := len(msg.FilterHeaders)
|
||||
err = WriteVarInt(w, uint64(count))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cfh := range msg.FilterHeaders {
|
||||
err := WriteElement(w, cfh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deserialize decodes a filter header from r into the receiver using a format
|
||||
// that is suitable for long-term storage such as a database. This function
|
||||
// differs from BtcDecode in that BtcDecode decodes from the bitcoin wire
|
||||
// protocol as it was sent across the network. The wire encoding can
|
||||
// technically differ depending on the protocol version and doesn't even really
|
||||
// need to match the format of a stored filter header at all. As of the time
|
||||
// this comment was written, the encoded filter header is the same in both
|
||||
// instances, but there is a distinct difference and separating the two allows
|
||||
// the API to be flexible enough to deal with changes.
|
||||
func (msg *MsgCFCheckpt) Deserialize(r io.Reader) error {
|
||||
// At the current time, there is no difference between the wire encoding
|
||||
// and the stable long-term storage format. As a result, make use of
|
||||
// BtcDecode.
|
||||
return msg.BtcDecode(r, 0)
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgCFCheckpt) Command() string {
|
||||
return CmdCFCheckpt
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgCFCheckpt) MaxPayloadLength(pver uint32) uint32 {
|
||||
// Message size depends on the blockchain height, so return general limit
|
||||
// for all messages.
|
||||
return MaxMessagePayload
|
||||
}
|
||||
|
||||
// NewMsgCFCheckpt returns a new bitcoin cfheaders message that conforms to
|
||||
// the Message interface. See MsgCFCheckpt for details.
|
||||
func NewMsgCFCheckpt(filterType FilterType, stopHash *daghash.Hash,
|
||||
headersCount int) *MsgCFCheckpt {
|
||||
return &MsgCFCheckpt{
|
||||
FilterType: filterType,
|
||||
StopHash: stopHash,
|
||||
FilterHeaders: make([]*daghash.Hash, 0, headersCount),
|
||||
}
|
||||
}
|
@ -1,184 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxCFHeaderPayload is the maximum byte size of a committed
|
||||
// filter header.
|
||||
MaxCFHeaderPayload = daghash.HashSize
|
||||
|
||||
// MaxCFHeadersPerMsg is the maximum number of committed filter headers
|
||||
// that can be in a single bitcoin cfheaders message.
|
||||
MaxCFHeadersPerMsg = 2000
|
||||
)
|
||||
|
||||
// MsgCFHeaders implements the Message interface and represents a bitcoin
|
||||
// cfheaders message. It is used to deliver committed filter header information
|
||||
// in response to a getcfheaders message (MsgGetCFHeaders). The maximum number
|
||||
// of committed filter headers per message is currently 2000. See
|
||||
// MsgGetCFHeaders for details on requesting the headers.
|
||||
type MsgCFHeaders struct {
|
||||
FilterType FilterType
|
||||
StopHash *daghash.Hash
|
||||
PrevFilterHeader *daghash.Hash
|
||||
FilterHashes []*daghash.Hash
|
||||
}
|
||||
|
||||
// AddCFHash adds a new filter hash to the message.
|
||||
func (msg *MsgCFHeaders) AddCFHash(hash *daghash.Hash) error {
|
||||
if len(msg.FilterHashes)+1 > MaxCFHeadersPerMsg {
|
||||
str := fmt.Sprintf("too many block headers in message [max %d]",
|
||||
MaxBlockHeadersPerMsg)
|
||||
return messageError("MsgCFHeaders.AddCFHash", str)
|
||||
}
|
||||
|
||||
msg.FilterHashes = append(msg.FilterHashes, hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgCFHeaders) BtcDecode(r io.Reader, pver uint32) error {
|
||||
// Read filter type
|
||||
err := ReadElement(r, &msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read stop hash
|
||||
msg.StopHash = &daghash.Hash{}
|
||||
err = ReadElement(r, msg.StopHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read prev filter header
|
||||
msg.PrevFilterHeader = &daghash.Hash{}
|
||||
err = ReadElement(r, msg.PrevFilterHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read number of filter headers
|
||||
count, err := ReadVarInt(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Limit to max committed filter headers per message.
|
||||
if count > MaxCFHeadersPerMsg {
|
||||
str := fmt.Sprintf("too many committed filter headers for "+
|
||||
"message [count %d, max %d]", count,
|
||||
MaxBlockHeadersPerMsg)
|
||||
return messageError("MsgCFHeaders.BtcDecode", str)
|
||||
}
|
||||
|
||||
// Create a contiguous slice of hashes to deserialize into in order to
|
||||
// reduce the number of allocations.
|
||||
msg.FilterHashes = make([]*daghash.Hash, 0, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
var cfh daghash.Hash
|
||||
err := ReadElement(r, &cfh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.AddCFHash(&cfh)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgCFHeaders) BtcEncode(w io.Writer, pver uint32) error {
|
||||
// Write filter type
|
||||
err := WriteElement(w, msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write stop hash
|
||||
err = WriteElement(w, msg.StopHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write prev filter header
|
||||
err = WriteElement(w, msg.PrevFilterHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Limit to max committed headers per message.
|
||||
count := len(msg.FilterHashes)
|
||||
if count > MaxCFHeadersPerMsg {
|
||||
str := fmt.Sprintf("too many committed filter headers for "+
|
||||
"message [count %d, max %d]", count,
|
||||
MaxBlockHeadersPerMsg)
|
||||
return messageError("MsgCFHeaders.BtcEncode", str)
|
||||
}
|
||||
|
||||
err = WriteVarInt(w, uint64(count))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cfh := range msg.FilterHashes {
|
||||
err := WriteElement(w, cfh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deserialize decodes a filter header from r into the receiver using a format
|
||||
// that is suitable for long-term storage such as a database. This function
|
||||
// differs from BtcDecode in that BtcDecode decodes from the bitcoin wire
|
||||
// protocol as it was sent across the network. The wire encoding can
|
||||
// technically differ depending on the protocol version and doesn't even really
|
||||
// need to match the format of a stored filter header at all. As of the time
|
||||
// this comment was written, the encoded filter header is the same in both
|
||||
// instances, but there is a distinct difference and separating the two allows
|
||||
// the API to be flexible enough to deal with changes.
|
||||
func (msg *MsgCFHeaders) Deserialize(r io.Reader) error {
|
||||
// At the current time, there is no difference between the wire encoding
|
||||
// and the stable long-term storage format. As a result, make use of
|
||||
// BtcDecode.
|
||||
return msg.BtcDecode(r, 0)
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgCFHeaders) Command() string {
|
||||
return CmdCFHeaders
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgCFHeaders) MaxPayloadLength(pver uint32) uint32 {
|
||||
// Hash size + filter type + num headers (varInt) +
|
||||
// (header size * max headers).
|
||||
return 1 + daghash.HashSize + daghash.HashSize + MaxVarIntPayload +
|
||||
(MaxCFHeaderPayload * MaxCFHeadersPerMsg)
|
||||
}
|
||||
|
||||
// NewMsgCFHeaders returns a new bitcoin cfheaders message that conforms to
|
||||
// the Message interface. See MsgCFHeaders for details.
|
||||
func NewMsgCFHeaders() *MsgCFHeaders {
|
||||
return &MsgCFHeaders{
|
||||
FilterHashes: make([]*daghash.Hash, 0, MaxCFHeadersPerMsg),
|
||||
StopHash: &daghash.ZeroHash,
|
||||
PrevFilterHeader: &daghash.ZeroHash,
|
||||
}
|
||||
}
|
@ -1,122 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// FilterType is used to represent a filter type.
|
||||
type FilterType uint8
|
||||
|
||||
const (
|
||||
// GCSFilterRegular is the regular filter type.
|
||||
GCSFilterRegular FilterType = iota
|
||||
|
||||
// GCSFilterExtended is the extended filter type.
|
||||
GCSFilterExtended
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxCFilterDataSize is the maximum byte size of a committed filter.
|
||||
// The maximum size is currently defined as 256KiB.
|
||||
MaxCFilterDataSize = 256 * 1024
|
||||
)
|
||||
|
||||
// MsgCFilter implements the Message interface and represents a bitcoin cfilter
|
||||
// message. It is used to deliver a committed filter in response to a
|
||||
// getcfilters (MsgGetCFilters) message.
|
||||
type MsgCFilter struct {
|
||||
FilterType FilterType
|
||||
BlockHash daghash.Hash
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgCFilter) BtcDecode(r io.Reader, pver uint32) error {
|
||||
// Read filter type
|
||||
err := ReadElement(r, &msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the hash of the filter's block
|
||||
err = ReadElement(r, &msg.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read filter data
|
||||
msg.Data, err = ReadVarBytes(r, pver, MaxCFilterDataSize,
|
||||
"cfilter data")
|
||||
return err
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgCFilter) BtcEncode(w io.Writer, pver uint32) error {
|
||||
size := len(msg.Data)
|
||||
if size > MaxCFilterDataSize {
|
||||
str := fmt.Sprintf("cfilter size too large for message "+
|
||||
"[size %d, max %d]", size, MaxCFilterDataSize)
|
||||
return messageError("MsgCFilter.BtcEncode", str)
|
||||
}
|
||||
|
||||
err := WriteElement(w, msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = WriteElement(w, msg.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return WriteVarBytes(w, pver, msg.Data)
|
||||
}
|
||||
|
||||
// Deserialize decodes a filter from r into the receiver using a format that is
|
||||
// suitable for long-term storage such as a database. This function differs
|
||||
// from BtcDecode in that BtcDecode decodes from the bitcoin wire protocol as
|
||||
// it was sent across the network. The wire encoding can technically differ
|
||||
// depending on the protocol version and doesn't even really need to match the
|
||||
// format of a stored filter at all. As of the time this comment was written,
|
||||
// the encoded filter is the same in both instances, but there is a distinct
|
||||
// difference and separating the two allows the API to be flexible enough to
|
||||
// deal with changes.
|
||||
func (msg *MsgCFilter) Deserialize(r io.Reader) error {
|
||||
// At the current time, there is no difference between the wire encoding
|
||||
// and the stable long-term storage format. As a result, make use of
|
||||
// BtcDecode.
|
||||
return msg.BtcDecode(r, 0)
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgCFilter) Command() string {
|
||||
return CmdCFilter
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgCFilter) MaxPayloadLength(pver uint32) uint32 {
|
||||
return uint32(VarIntSerializeSize(MaxCFilterDataSize)) +
|
||||
MaxCFilterDataSize + daghash.HashSize + 1
|
||||
}
|
||||
|
||||
// NewMsgCFilter returns a new bitcoin cfilter message that conforms to the
|
||||
// Message interface. See MsgCFilter for details.
|
||||
func NewMsgCFilter(filterType FilterType, blockHash *daghash.Hash,
|
||||
data []byte) *MsgCFilter {
|
||||
return &MsgCFilter{
|
||||
FilterType: filterType,
|
||||
BlockHash: *blockHash,
|
||||
Data: data,
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
// Copyright (c) 2018 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// MsgGetCFCheckpt is a request for filter headers at evenly spaced intervals
|
||||
// throughout the blockchain history. It allows to set the FilterType field to
|
||||
// get headers in the chain of basic (0x00) or extended (0x01) headers.
|
||||
type MsgGetCFCheckpt struct {
|
||||
FilterType FilterType
|
||||
StopHash *daghash.Hash
|
||||
}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFCheckpt) BtcDecode(r io.Reader, pver uint32) error {
|
||||
err := ReadElement(r, &msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg.StopHash = &daghash.Hash{}
|
||||
return ReadElement(r, msg.StopHash)
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFCheckpt) BtcEncode(w io.Writer, pver uint32) error {
|
||||
err := WriteElement(w, msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return WriteElement(w, msg.StopHash)
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgGetCFCheckpt) Command() string {
|
||||
return CmdGetCFCheckpt
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFCheckpt) MaxPayloadLength(pver uint32) uint32 {
|
||||
// Filter type + uint32 + block hash
|
||||
return 1 + daghash.HashSize
|
||||
}
|
||||
|
||||
// NewMsgGetCFCheckpt returns a new bitcoin getcfcheckpt message that conforms
|
||||
// to the Message interface using the passed parameters and defaults for the
|
||||
// remaining fields.
|
||||
func NewMsgGetCFCheckpt(filterType FilterType, stopHash *daghash.Hash) *MsgGetCFCheckpt {
|
||||
return &MsgGetCFCheckpt{
|
||||
FilterType: filterType,
|
||||
StopHash: stopHash,
|
||||
}
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// MsgGetCFHeaders is a message similar to MsgGetHeaders, but for committed
|
||||
// filter headers. It allows to set the FilterType field to get headers in the
|
||||
// chain of basic (0x00) or extended (0x01) headers.
|
||||
type MsgGetCFHeaders struct {
|
||||
FilterType FilterType
|
||||
StartHeight uint64
|
||||
StopHash *daghash.Hash
|
||||
}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFHeaders) BtcDecode(r io.Reader, pver uint32) error {
|
||||
err := ReadElement(r, &msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ReadElement(r, &msg.StartHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg.StopHash = &daghash.Hash{}
|
||||
return ReadElement(r, msg.StopHash)
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFHeaders) BtcEncode(w io.Writer, pver uint32) error {
|
||||
err := WriteElement(w, msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = WriteElement(w, &msg.StartHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return WriteElement(w, msg.StopHash)
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgGetCFHeaders) Command() string {
|
||||
return CmdGetCFHeaders
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFHeaders) MaxPayloadLength(pver uint32) uint32 {
|
||||
// Filter type + uint64 + block hash
|
||||
return 1 + 8 + daghash.HashSize
|
||||
}
|
||||
|
||||
// NewMsgGetCFHeaders returns a new bitcoin getcfheader message that conforms to
|
||||
// the Message interface using the passed parameters and defaults for the
|
||||
// remaining fields.
|
||||
func NewMsgGetCFHeaders(filterType FilterType, startHeight uint64,
|
||||
stopHash *daghash.Hash) *MsgGetCFHeaders {
|
||||
return &MsgGetCFHeaders{
|
||||
FilterType: filterType,
|
||||
StartHeight: startHeight,
|
||||
StopHash: stopHash,
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
// Copyright (c) 2017 The btcsuite developers
|
||||
// Use of this source code is governed by an ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package wire
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/kaspanet/kaspad/util/daghash"
|
||||
)
|
||||
|
||||
// MaxGetCFiltersReqRange the maximum number of filters that may be requested in
|
||||
// a getcfheaders message.
|
||||
const MaxGetCFiltersReqRange = 1000
|
||||
|
||||
// MsgGetCFilters implements the Message interface and represents a bitcoin
|
||||
// getcfilters message. It is used to request committed filters for a range of
|
||||
// blocks.
|
||||
type MsgGetCFilters struct {
|
||||
FilterType FilterType
|
||||
StartHeight uint64
|
||||
StopHash *daghash.Hash
|
||||
}
|
||||
|
||||
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFilters) BtcDecode(r io.Reader, pver uint32) error {
|
||||
err := ReadElement(r, &msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ReadElement(r, &msg.StartHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg.StopHash = &daghash.Hash{}
|
||||
return ReadElement(r, msg.StopHash)
|
||||
}
|
||||
|
||||
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
|
||||
// This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFilters) BtcEncode(w io.Writer, pver uint32) error {
|
||||
err := WriteElement(w, msg.FilterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = WriteElement(w, &msg.StartHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return WriteElement(w, msg.StopHash)
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message. This is part
|
||||
// of the Message interface implementation.
|
||||
func (msg *MsgGetCFilters) Command() string {
|
||||
return CmdGetCFilters
|
||||
}
|
||||
|
||||
// MaxPayloadLength returns the maximum length the payload can be for the
|
||||
// receiver. This is part of the Message interface implementation.
|
||||
func (msg *MsgGetCFilters) MaxPayloadLength(pver uint32) uint32 {
|
||||
// Filter type + uint64 + block hash
|
||||
return 1 + 8 + daghash.HashSize
|
||||
}
|
||||
|
||||
// NewMsgGetCFilters returns a new bitcoin getcfilters message that conforms to
|
||||
// the Message interface using the passed parameters and defaults for the
|
||||
// remaining fields.
|
||||
func NewMsgGetCFilters(filterType FilterType, startHeight uint64,
|
||||
stopHash *daghash.Hash) *MsgGetCFilters {
|
||||
return &MsgGetCFilters{
|
||||
FilterType: filterType,
|
||||
StartHeight: startHeight,
|
||||
StopHash: stopHash,
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user