mirror of
https://github.com/kaspanet/kaspad.git
synced 2026-02-22 11:39:15 +00:00
Compare commits
4 Commits
v0.10.0-al
...
github-dep
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7de07cdc97 | ||
|
|
921ca19b42 | ||
|
|
98c2dc8189 | ||
|
|
37654156a6 |
31
.github/workflows/go-deploy.yml
vendored
31
.github/workflows/go-deploy.yml
vendored
@@ -37,33 +37,24 @@ jobs:
|
||||
# `-extldflags=-static` - means static link everything, `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net"
|
||||
# `-s -w` strips the binary to produce smaller size binaries
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-linux.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
binary="kaspad-${{ github.event.release.tag_name }}-linux"
|
||||
echo "binary=${binary}" >> $GITHUB_ENV
|
||||
go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o "${binary}"
|
||||
|
||||
- name: Build on Windows
|
||||
if: runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-win64.zip"
|
||||
powershell "Compress-Archive bin/* \"${archive}\""
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
binary="kaspad-${{ github.event.release.tag_name }}-win64.exe"
|
||||
echo "binary=${binary}" >> $GITHUB_ENV
|
||||
go build -v -ldflags="-s -w" -o "${binary}"
|
||||
|
||||
- name: Build on MacOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
go build -v -ldflags="-s -w" -o ./bin/ ./...
|
||||
archive="bin/kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
asset_name="kaspad-${{ github.event.release.tag_name }}-osx.zip"
|
||||
zip -r "${archive}" ./bin/*
|
||||
echo "archive=${archive}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
binary="kaspad-${{ github.event.release.tag_name }}-osx"
|
||||
echo "binary=${binary}" >> $GITHUB_ENV
|
||||
go build -v -ldflags="-s -w" -o "${binary}"
|
||||
|
||||
|
||||
- name: Upload Release Asset
|
||||
@@ -72,6 +63,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ github.event.release.upload_url }}
|
||||
asset_path: "./${{ env.archive }}"
|
||||
asset_name: "${{ env.asset_name }}"
|
||||
asset_path: "./${{ env.binary }}"
|
||||
asset_name: "${{ env.binary }}"
|
||||
asset_content_type: application/zip
|
||||
|
||||
3
.github/workflows/go.yml
vendored
3
.github/workflows/go.yml
vendored
@@ -63,9 +63,6 @@ jobs:
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- name: Delete the stability tests from coverage
|
||||
run: rm -r stability-tests
|
||||
|
||||
- name: Create coverage file
|
||||
run: go test -v -covermode=atomic -coverpkg=./... -coverprofile coverage.txt ./...
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ $ kaspad
|
||||
```
|
||||
|
||||
## Discord
|
||||
Join our discord server using the following link: https://discord.gg/YNYnNN5Pf2
|
||||
Join our discord server using the following link: https://discord.gg/WmGhhzk
|
||||
|
||||
## Issue Tracker
|
||||
|
||||
|
||||
@@ -171,7 +171,7 @@ func doUpgrades() error {
|
||||
|
||||
// dbPath returns the path to the block database given a database type.
|
||||
func databasePath(cfg *config.Config) string {
|
||||
return filepath.Join(cfg.AppDir, "data")
|
||||
return filepath.Join(cfg.DataDir, "db")
|
||||
}
|
||||
|
||||
func removeDatabase(cfg *config.Config) error {
|
||||
|
||||
@@ -3,7 +3,6 @@ package appmessage
|
||||
import (
|
||||
"encoding/hex"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/blockheader"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
@@ -84,6 +83,7 @@ func DomainTransactionToMsgTx(domainTransaction *externalapi.DomainTransaction)
|
||||
LockTime: domainTransaction.LockTime,
|
||||
SubnetworkID: domainTransaction.SubnetworkID,
|
||||
Gas: domainTransaction.Gas,
|
||||
PayloadHash: domainTransaction.PayloadHash,
|
||||
Payload: domainTransaction.Payload,
|
||||
}
|
||||
}
|
||||
@@ -133,6 +133,7 @@ func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction {
|
||||
LockTime: msgTx.LockTime,
|
||||
SubnetworkID: msgTx.SubnetworkID,
|
||||
Gas: msgTx.Gas,
|
||||
PayloadHash: msgTx.PayloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
@@ -197,6 +198,10 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHash, err := externalapi.NewDomainHashFromString(rpcTransaction.PayloadHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, err := hex.DecodeString(rpcTransaction.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -209,6 +214,7 @@ func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externa
|
||||
LockTime: rpcTransaction.LockTime,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: rpcTransaction.LockTime,
|
||||
PayloadHash: *payloadHash,
|
||||
Payload: payload,
|
||||
}, nil
|
||||
}
|
||||
@@ -238,6 +244,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
}
|
||||
}
|
||||
subnetworkID := transaction.SubnetworkID.String()
|
||||
payloadHash := transaction.PayloadHash.String()
|
||||
payload := hex.EncodeToString(transaction.Payload)
|
||||
return &RPCTransaction{
|
||||
Version: transaction.Version,
|
||||
@@ -246,6 +253,7 @@ func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransactio
|
||||
LockTime: transaction.LockTime,
|
||||
SubnetworkID: subnetworkID,
|
||||
Gas: transaction.LockTime,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: payload,
|
||||
}
|
||||
}
|
||||
@@ -266,7 +274,7 @@ func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(
|
||||
outpointAndUTXOEntryPair.UTXOEntry.Amount,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore,
|
||||
outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore,
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -289,76 +297,9 @@ func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(
|
||||
Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount(),
|
||||
ScriptPublicKey: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey(),
|
||||
IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase(),
|
||||
BlockDAAScore: outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore(),
|
||||
BlockBlueScore: outpointAndUTXOEntryPair.UTXOEntry.BlockBlueScore(),
|
||||
},
|
||||
}
|
||||
}
|
||||
return domainOutpointAndUTXOEntryPairs
|
||||
}
|
||||
|
||||
// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks
|
||||
func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock {
|
||||
header := &RPCBlockHeader{
|
||||
Version: uint32(block.Header.Version()),
|
||||
ParentHashes: hashes.ToStrings(block.Header.ParentHashes()),
|
||||
HashMerkleRoot: block.Header.HashMerkleRoot().String(),
|
||||
AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(),
|
||||
UTXOCommitment: block.Header.UTXOCommitment().String(),
|
||||
Timestamp: block.Header.TimeInMilliseconds(),
|
||||
Bits: block.Header.Bits(),
|
||||
Nonce: block.Header.Nonce(),
|
||||
}
|
||||
transactions := make([]*RPCTransaction, len(block.Transactions))
|
||||
for i, transaction := range block.Transactions {
|
||||
transactions[i] = DomainTransactionToRPCTransaction(transaction)
|
||||
}
|
||||
return &RPCBlock{
|
||||
Header: header,
|
||||
Transactions: transactions,
|
||||
}
|
||||
}
|
||||
|
||||
// RPCBlockToDomainBlock converts `block` into a DomainBlock
|
||||
func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) {
|
||||
parentHashes := make([]*externalapi.DomainHash, len(block.Header.ParentHashes))
|
||||
for i, parentHash := range block.Header.ParentHashes {
|
||||
domainParentHashes, err := externalapi.NewDomainHashFromString(parentHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentHashes[i] = domainParentHashes
|
||||
}
|
||||
hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acceptedIDMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.AcceptedIDMerkleRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utxoCommitment, err := externalapi.NewDomainHashFromString(block.Header.UTXOCommitment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header := blockheader.NewImmutableBlockHeader(
|
||||
uint16(block.Header.Version),
|
||||
parentHashes,
|
||||
hashMerkleRoot,
|
||||
acceptedIDMerkleRoot,
|
||||
utxoCommitment,
|
||||
block.Header.Timestamp,
|
||||
block.Header.Bits,
|
||||
block.Header.Nonce)
|
||||
transactions := make([]*externalapi.DomainTransaction, len(block.Transactions))
|
||||
for i, transaction := range block.Transactions {
|
||||
domainTransaction, err := RPCTransactionToDomainTransaction(transaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions[i] = domainTransaction
|
||||
}
|
||||
return &externalapi.DomainBlock{
|
||||
Header: header,
|
||||
Transactions: transactions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,19 @@ import (
|
||||
// backing array multiple times.
|
||||
const defaultTransactionAlloc = 2048
|
||||
|
||||
// MaxMassAcceptedByBlock is the maximum total transaction mass a block may accept.
|
||||
const MaxMassAcceptedByBlock = 10000000
|
||||
|
||||
// MaxMassPerTx is the maximum total mass a transaction may have.
|
||||
const MaxMassPerTx = MaxMassAcceptedByBlock / 2
|
||||
|
||||
// MaxTxPerBlock is the maximum number of transactions that could
|
||||
// possibly fit into a block.
|
||||
const MaxTxPerBlock = (MaxMassAcceptedByBlock / minTxPayload) + 1
|
||||
|
||||
// MaxBlockParents is the maximum allowed number of parents for block.
|
||||
const MaxBlockParents = 10
|
||||
|
||||
// TxLoc holds locator data for the offset and length of where a transaction is
|
||||
// located within a MsgBlock data buffer.
|
||||
type TxLoc struct {
|
||||
|
||||
@@ -31,6 +31,6 @@ type OutpointAndUTXOEntryPair struct {
|
||||
type UTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *externalapi.ScriptPublicKey
|
||||
BlockDAAScore uint64
|
||||
BlockBlueScore uint64
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package appmessage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
"strconv"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -132,6 +133,7 @@ type MsgTx struct {
|
||||
LockTime uint64
|
||||
SubnetworkID externalapi.DomainSubnetworkID
|
||||
Gas uint64
|
||||
PayloadHash externalapi.DomainHash
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
@@ -177,6 +179,7 @@ func (msg *MsgTx) Copy() *MsgTx {
|
||||
LockTime: msg.LockTime,
|
||||
SubnetworkID: msg.SubnetworkID,
|
||||
Gas: msg.Gas,
|
||||
PayloadHash: msg.PayloadHash,
|
||||
}
|
||||
|
||||
if msg.Payload != nil {
|
||||
@@ -277,12 +280,18 @@ func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *extern
|
||||
txOut = make([]*TxOut, 0, defaultTxInOutAlloc)
|
||||
}
|
||||
|
||||
var payloadHash externalapi.DomainHash
|
||||
if *subnetworkID != subnetworks.SubnetworkIDNative {
|
||||
payloadHash = *hashes.PayloadHash(payload)
|
||||
}
|
||||
|
||||
return &MsgTx{
|
||||
Version: version,
|
||||
TxIn: txIn,
|
||||
TxOut: txOut,
|
||||
SubnetworkID: *subnetworkID,
|
||||
Gas: gas,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: payload,
|
||||
LockTime: lockTime,
|
||||
}
|
||||
|
||||
@@ -133,8 +133,8 @@ func TestTx(t *testing.T) {
|
||||
|
||||
// TestTxHash tests the ability to generate the hash of a transaction accurately.
|
||||
func TestTxHashAndID(t *testing.T) {
|
||||
txHash1Str := "93663e597f6c968d32d229002f76408edf30d6a0151ff679fc729812d8cb2acc"
|
||||
txID1Str := "24079c6d2bdf602fc389cc307349054937744a9c8dc0f07c023e6af0e949a4e7"
|
||||
txHash1Str := "4bee9ee495bd93a755de428376bd582a2bb6ec37c041753b711c0606d5745c13"
|
||||
txID1Str := "f868bd20e816256b80eac976821be4589d24d21141bd1cec6e8005d0c16c6881"
|
||||
wantTxID1, err := transactionid.FromString(txID1Str)
|
||||
if err != nil {
|
||||
t.Fatalf("NewTxIDFromStr: %v", err)
|
||||
@@ -185,14 +185,14 @@ func TestTxHashAndID(t *testing.T) {
|
||||
spew.Sprint(tx1ID), spew.Sprint(wantTxID1))
|
||||
}
|
||||
|
||||
hash2Str := "8dafd1bec24527d8e3b443ceb0a3b92fffc0d60026317f890b2faf5e9afc177a"
|
||||
hash2Str := "cb1bdb4a83d4885535fb3cceb5c96597b7df903db83f0ffcd779d703affd8efd"
|
||||
wantHash2, err := externalapi.NewDomainHashFromString(hash2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
id2Str := "89ffb49474637502d9059af38b8a95fc2f0d3baef5c801d7a9b9c8830671b711"
|
||||
id2Str := "ca080073d4ddf5b84443a0964af633f3c70a5b290fd3bc35a7e6f93fd33f9330"
|
||||
wantID2, err := transactionid.FromString(id2Str)
|
||||
if err != nil {
|
||||
t.Errorf("NewTxIDFromStr: %v", err)
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestVersion(t *testing.T) {
|
||||
|
||||
// Create version message data.
|
||||
tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 16111}
|
||||
me := NewNetAddress(tcpAddrMe)
|
||||
me := NewNetAddress(tcpAddrMe, SFNodeNetwork)
|
||||
generatedID, err := id.GenerateID()
|
||||
if err != nil {
|
||||
t.Fatalf("id.GenerateID: %s", err)
|
||||
|
||||
@@ -15,6 +15,9 @@ type NetAddress struct {
|
||||
// Last time the address was seen.
|
||||
Timestamp mstime.Time
|
||||
|
||||
// Bitfield which identifies the services supported by the address.
|
||||
Services ServiceFlag
|
||||
|
||||
// IP address of the peer.
|
||||
IP net.IP
|
||||
|
||||
@@ -23,6 +26,17 @@ type NetAddress struct {
|
||||
Port uint16
|
||||
}
|
||||
|
||||
// HasService returns whether the specified service is supported by the address.
|
||||
func (na *NetAddress) HasService(service ServiceFlag) bool {
|
||||
return na.Services&service == service
|
||||
}
|
||||
|
||||
// AddService adds service as a supported service by the peer generating the
|
||||
// message.
|
||||
func (na *NetAddress) AddService(service ServiceFlag) {
|
||||
na.Services |= service
|
||||
}
|
||||
|
||||
// TCPAddress converts the NetAddress to *net.TCPAddr
|
||||
func (na *NetAddress) TCPAddress() *net.TCPAddr {
|
||||
return &net.TCPAddr{
|
||||
@@ -33,19 +47,20 @@ func (na *NetAddress) TCPAddress() *net.TCPAddr {
|
||||
|
||||
// NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and
|
||||
// supported services with defaults for the remaining fields.
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
|
||||
return NewNetAddressTimestamp(mstime.Now(), ip, port)
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16, services ServiceFlag) *NetAddress {
|
||||
return NewNetAddressTimestamp(mstime.Now(), services, ip, port)
|
||||
}
|
||||
|
||||
// NewNetAddressTimestamp returns a new NetAddress using the provided
|
||||
// timestamp, IP, port, and supported services. The timestamp is rounded to
|
||||
// single millisecond precision.
|
||||
func NewNetAddressTimestamp(
|
||||
timestamp mstime.Time, ip net.IP, port uint16) *NetAddress {
|
||||
timestamp mstime.Time, services ServiceFlag, ip net.IP, port uint16) *NetAddress {
|
||||
// Limit the timestamp to one millisecond precision since the protocol
|
||||
// doesn't support better.
|
||||
na := NetAddress{
|
||||
Timestamp: timestamp,
|
||||
Services: services,
|
||||
IP: ip,
|
||||
Port: port,
|
||||
}
|
||||
@@ -54,6 +69,6 @@ func NewNetAddressTimestamp(
|
||||
|
||||
// NewNetAddress returns a new NetAddress using the provided TCP address and
|
||||
// supported services with defaults for the remaining fields.
|
||||
func NewNetAddress(addr *net.TCPAddr) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port))
|
||||
func NewNetAddress(addr *net.TCPAddr, services ServiceFlag) *NetAddress {
|
||||
return NewNetAddressIPPort(addr.IP, uint16(addr.Port), services)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestNetAddress(t *testing.T) {
|
||||
port := 16111
|
||||
|
||||
// Test NewNetAddress.
|
||||
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port})
|
||||
na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port}, 0)
|
||||
|
||||
// Ensure we get the same ip, port, and services back out.
|
||||
if !na.IP.Equal(ip) {
|
||||
@@ -25,4 +25,21 @@ func TestNetAddress(t *testing.T) {
|
||||
t.Errorf("NetNetAddress: wrong port - got %v, want %v", na.Port,
|
||||
port)
|
||||
}
|
||||
if na.Services != 0 {
|
||||
t.Errorf("NetNetAddress: wrong services - got %v, want %v",
|
||||
na.Services, 0)
|
||||
}
|
||||
if na.HasService(SFNodeNetwork) {
|
||||
t.Errorf("HasService: SFNodeNetwork service is set")
|
||||
}
|
||||
|
||||
// Ensure adding the full service node flag works.
|
||||
na.AddService(SFNodeNetwork)
|
||||
if na.Services != SFNodeNetwork {
|
||||
t.Errorf("AddService: wrong services - got %v, want %v",
|
||||
na.Services, SFNodeNetwork)
|
||||
}
|
||||
if !na.HasService(SFNodeNetwork) {
|
||||
t.Errorf("HasService: SFNodeNetwork service not set")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func NewGetBlockRequestMessage(hash string, includeTransactionVerboseData bool)
|
||||
// its respective RPC message
|
||||
type GetBlockResponseMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
BlockVerboseData *BlockVerboseData
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -39,3 +39,71 @@ func (msg *GetBlockResponseMessage) Command() MessageCommand {
|
||||
func NewGetBlockResponseMessage() *GetBlockResponseMessage {
|
||||
return &GetBlockResponseMessage{}
|
||||
}
|
||||
|
||||
// BlockVerboseData holds verbose data about a block
|
||||
type BlockVerboseData struct {
|
||||
Hash string
|
||||
Version uint16
|
||||
VersionHex string
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
TxIDs []string
|
||||
TransactionVerboseData []*TransactionVerboseData
|
||||
Time int64
|
||||
Nonce uint64
|
||||
Bits string
|
||||
Difficulty float64
|
||||
ParentHashes []string
|
||||
ChildrenHashes []string
|
||||
SelectedParentHash string
|
||||
BlueScore uint64
|
||||
IsHeaderOnly bool
|
||||
}
|
||||
|
||||
// TransactionVerboseData holds verbose data about a transaction
|
||||
type TransactionVerboseData struct {
|
||||
TxID string
|
||||
Hash string
|
||||
Size uint64
|
||||
Version uint16
|
||||
LockTime uint64
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload string
|
||||
TransactionVerboseInputs []*TransactionVerboseInput
|
||||
TransactionVerboseOutputs []*TransactionVerboseOutput
|
||||
BlockHash string
|
||||
Time uint64
|
||||
BlockTime uint64
|
||||
}
|
||||
|
||||
// TransactionVerboseInput holds data about a transaction input
|
||||
type TransactionVerboseInput struct {
|
||||
TxID string
|
||||
OutputIndex uint32
|
||||
ScriptSig *ScriptSig
|
||||
Sequence uint64
|
||||
}
|
||||
|
||||
// ScriptSig holds data about a script signature
|
||||
type ScriptSig struct {
|
||||
Asm string
|
||||
Hex string
|
||||
}
|
||||
|
||||
// TransactionVerboseOutput holds data about a transaction output
|
||||
type TransactionVerboseOutput struct {
|
||||
Value uint64
|
||||
Index uint32
|
||||
ScriptPubKey *ScriptPubKeyResult
|
||||
}
|
||||
|
||||
// ScriptPubKeyResult holds data about a script public key
|
||||
type ScriptPubKeyResult struct {
|
||||
Hex string
|
||||
Type string
|
||||
Address string
|
||||
Version uint16
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func NewGetBlockTemplateRequestMessage(payAddress string) *GetBlockTemplateReque
|
||||
// its respective RPC message
|
||||
type GetBlockTemplateResponseMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
MsgBlock *MsgBlock
|
||||
IsSynced bool
|
||||
|
||||
Error *RPCError
|
||||
@@ -35,9 +35,9 @@ func (msg *GetBlockTemplateResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlockTemplateResponseMessage returns a instance of the message
|
||||
func NewGetBlockTemplateResponseMessage(block *RPCBlock, isSynced bool) *GetBlockTemplateResponseMessage {
|
||||
func NewGetBlockTemplateResponseMessage(msgBlock *MsgBlock, isSynced bool) *GetBlockTemplateResponseMessage {
|
||||
return &GetBlockTemplateResponseMessage{
|
||||
Block: block,
|
||||
MsgBlock: msgBlock,
|
||||
IsSynced: isSynced,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ package appmessage
|
||||
type GetBlocksRequestMessage struct {
|
||||
baseMessage
|
||||
LowHash string
|
||||
IncludeBlocks bool
|
||||
IncludeBlockVerboseData bool
|
||||
IncludeTransactionVerboseData bool
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ func (msg *GetBlocksRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksRequestMessage returns a instance of the message
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
||||
func NewGetBlocksRequestMessage(lowHash string, includeBlockVerboseData bool,
|
||||
includeTransactionVerboseData bool) *GetBlocksRequestMessage {
|
||||
return &GetBlocksRequestMessage{
|
||||
LowHash: lowHash,
|
||||
IncludeBlocks: includeBlocks,
|
||||
IncludeBlockVerboseData: includeBlockVerboseData,
|
||||
IncludeTransactionVerboseData: includeTransactionVerboseData,
|
||||
}
|
||||
}
|
||||
@@ -28,8 +28,8 @@ func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool,
|
||||
// its respective RPC message
|
||||
type GetBlocksResponseMessage struct {
|
||||
baseMessage
|
||||
BlockHashes []string
|
||||
Blocks []*RPCBlock
|
||||
BlockHashes []string
|
||||
BlockVerboseData []*BlockVerboseData
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -40,6 +40,11 @@ func (msg *GetBlocksResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetBlocksResponseMessage returns a instance of the message
|
||||
func NewGetBlocksResponseMessage() *GetBlocksResponseMessage {
|
||||
return &GetBlocksResponseMessage{}
|
||||
func NewGetBlocksResponseMessage(blockHashes []string, blockHexes []string,
|
||||
blockVerboseData []*BlockVerboseData) *GetBlocksResponseMessage {
|
||||
|
||||
return &GetBlocksResponseMessage{
|
||||
BlockHashes: blockHashes,
|
||||
BlockVerboseData: blockVerboseData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,8 +20,7 @@ func NewGeInfoRequestMessage() *GetInfoRequestMessage {
|
||||
// its respective RPC message
|
||||
type GetInfoResponseMessage struct {
|
||||
baseMessage
|
||||
P2PID string
|
||||
MempoolSize uint64
|
||||
P2PID string
|
||||
|
||||
Error *RPCError
|
||||
}
|
||||
@@ -32,9 +31,8 @@ func (msg *GetInfoResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetInfoResponseMessage returns a instance of the message
|
||||
func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64) *GetInfoResponseMessage {
|
||||
func NewGetInfoResponseMessage(p2pID string) *GetInfoResponseMessage {
|
||||
return &GetInfoResponseMessage{
|
||||
P2PID: p2pID,
|
||||
MempoolSize: mempoolSize,
|
||||
P2PID: p2pID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,8 +28,8 @@ type GetMempoolEntryResponseMessage struct {
|
||||
|
||||
// MempoolEntry represents a transaction in the mempool.
|
||||
type MempoolEntry struct {
|
||||
Fee uint64
|
||||
Transaction *RPCTransaction
|
||||
Fee uint64
|
||||
TransactionVerboseData *TransactionVerboseData
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -38,11 +38,11 @@ func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewGetMempoolEntryResponseMessage returns a instance of the message
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction) *GetMempoolEntryResponseMessage {
|
||||
func NewGetMempoolEntryResponseMessage(fee uint64, transactionVerboseData *TransactionVerboseData) *GetMempoolEntryResponseMessage {
|
||||
return &GetMempoolEntryResponseMessage{
|
||||
Entry: &MempoolEntry{
|
||||
Fee: fee,
|
||||
Transaction: transaction,
|
||||
Fee: fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,8 @@ func NewNotifyBlockAddedResponseMessage() *NotifyBlockAddedResponseMessage {
|
||||
// its respective RPC message
|
||||
type BlockAddedNotificationMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
Block *MsgBlock
|
||||
BlockVerboseData *BlockVerboseData
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -46,8 +47,9 @@ func (msg *BlockAddedNotificationMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewBlockAddedNotificationMessage returns a instance of the message
|
||||
func NewBlockAddedNotificationMessage(block *RPCBlock) *BlockAddedNotificationMessage {
|
||||
func NewBlockAddedNotificationMessage(block *MsgBlock, blockVerboseData *BlockVerboseData) *BlockAddedNotificationMessage {
|
||||
return &BlockAddedNotificationMessage{
|
||||
Block: block,
|
||||
Block: block,
|
||||
BlockVerboseData: blockVerboseData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ package appmessage
|
||||
// its respective RPC message
|
||||
type SubmitBlockRequestMessage struct {
|
||||
baseMessage
|
||||
Block *RPCBlock
|
||||
Block *MsgBlock
|
||||
}
|
||||
|
||||
// Command returns the protocol command string for the message
|
||||
@@ -13,7 +13,7 @@ func (msg *SubmitBlockRequestMessage) Command() MessageCommand {
|
||||
}
|
||||
|
||||
// NewSubmitBlockRequestMessage returns a instance of the message
|
||||
func NewSubmitBlockRequestMessage(block *RPCBlock) *SubmitBlockRequestMessage {
|
||||
func NewSubmitBlockRequestMessage(block *MsgBlock) *SubmitBlockRequestMessage {
|
||||
return &SubmitBlockRequestMessage{
|
||||
Block: block,
|
||||
}
|
||||
@@ -57,35 +57,3 @@ func (msg *SubmitBlockResponseMessage) Command() MessageCommand {
|
||||
func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage {
|
||||
return &SubmitBlockResponseMessage{}
|
||||
}
|
||||
|
||||
// RPCBlock is a kaspad block representation meant to be
|
||||
// used over RPC
|
||||
type RPCBlock struct {
|
||||
Header *RPCBlockHeader
|
||||
Transactions []*RPCTransaction
|
||||
VerboseData *RPCBlockVerboseData
|
||||
}
|
||||
|
||||
// RPCBlockHeader is a kaspad block header representation meant to be
|
||||
// used over RPC
|
||||
type RPCBlockHeader struct {
|
||||
Version uint32
|
||||
ParentHashes []string
|
||||
HashMerkleRoot string
|
||||
AcceptedIDMerkleRoot string
|
||||
UTXOCommitment string
|
||||
Timestamp int64
|
||||
Bits uint32
|
||||
Nonce uint64
|
||||
}
|
||||
|
||||
// RPCBlockVerboseData holds verbose data about a block
|
||||
type RPCBlockVerboseData struct {
|
||||
Hash string
|
||||
Difficulty float64
|
||||
SelectedParentHash string
|
||||
TransactionIDs []string
|
||||
IsHeaderOnly bool
|
||||
BlueScore uint64
|
||||
ChildrenHashes []string
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@ type RPCTransaction struct {
|
||||
LockTime uint64
|
||||
SubnetworkID string
|
||||
Gas uint64
|
||||
PayloadHash string
|
||||
Payload string
|
||||
VerboseData *RPCTransactionVerboseData
|
||||
}
|
||||
|
||||
// RPCTransactionInput is a kaspad transaction input representation
|
||||
@@ -59,7 +59,6 @@ type RPCTransactionInput struct {
|
||||
PreviousOutpoint *RPCOutpoint
|
||||
SignatureScript string
|
||||
Sequence uint64
|
||||
VerboseData *RPCTransactionInputVerboseData
|
||||
}
|
||||
|
||||
// RPCScriptPublicKey is a kaspad ScriptPublicKey representation
|
||||
@@ -73,7 +72,6 @@ type RPCScriptPublicKey struct {
|
||||
type RPCTransactionOutput struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *RPCScriptPublicKey
|
||||
VerboseData *RPCTransactionOutputVerboseData
|
||||
}
|
||||
|
||||
// RPCOutpoint is a kaspad outpoint representation meant to be used
|
||||
@@ -88,25 +86,6 @@ type RPCOutpoint struct {
|
||||
type RPCUTXOEntry struct {
|
||||
Amount uint64
|
||||
ScriptPublicKey *RPCScriptPublicKey
|
||||
BlockDAAScore uint64
|
||||
BlockBlueScore uint64
|
||||
IsCoinbase bool
|
||||
}
|
||||
|
||||
// RPCTransactionVerboseData holds verbose data about a transaction
|
||||
type RPCTransactionVerboseData struct {
|
||||
TransactionID string
|
||||
Hash string
|
||||
Size uint64
|
||||
BlockHash string
|
||||
BlockTime uint64
|
||||
}
|
||||
|
||||
// RPCTransactionInputVerboseData holds data about a transaction input
|
||||
type RPCTransactionInputVerboseData struct {
|
||||
}
|
||||
|
||||
// RPCTransactionOutputVerboseData holds data about a transaction output
|
||||
type RPCTransactionOutputVerboseData struct {
|
||||
ScriptPublicKeyType string
|
||||
ScriptPublicKeyAddress string
|
||||
}
|
||||
|
||||
@@ -72,8 +72,6 @@ func (a *ComponentManager) Stop() {
|
||||
log.Errorf("Error stopping the net adapter: %+v", err)
|
||||
}
|
||||
|
||||
a.protocolManager.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -157,7 +155,7 @@ func setupRPC(
|
||||
|
||||
func (a *ComponentManager) maybeSeedFromDNS() {
|
||||
if !a.cfg.DisableDNSSeed {
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, false, nil,
|
||||
dnsseed.SeedFromDNS(a.cfg.NetParams(), a.cfg.DNSSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
a.cfg.Lookup, func(addresses []*appmessage.NetAddress) {
|
||||
// Kaspad uses a lookup of the dns seeder here. Since seeder returns
|
||||
// IPs of nodes and not its own IP, we can not know real IP of
|
||||
@@ -165,7 +163,7 @@ func (a *ComponentManager) maybeSeedFromDNS() {
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
|
||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, false, nil,
|
||||
dnsseed.SeedFromGRPC(a.cfg.NetParams(), a.cfg.GRPCSeed, appmessage.SFNodeNetwork, false, nil,
|
||||
func(addresses []*appmessage.NetAddress) {
|
||||
a.addressManager.AddAddresses(addresses...)
|
||||
})
|
||||
|
||||
@@ -61,8 +61,6 @@ type FlowContext struct {
|
||||
|
||||
orphans map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||
orphansMutex sync.RWMutex
|
||||
|
||||
shutdownChan chan struct{}
|
||||
}
|
||||
|
||||
// New returns a new instance of FlowContext.
|
||||
@@ -81,21 +79,9 @@ func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanage
|
||||
transactionsToRebroadcast: make(map[externalapi.DomainTransactionID]*externalapi.DomainTransaction),
|
||||
orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
timeStarted: mstime.Now().UnixMilliseconds(),
|
||||
shutdownChan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Close signals to all flows the the protocol manager is closed.
|
||||
func (f *FlowContext) Close() {
|
||||
close(f.shutdownChan)
|
||||
}
|
||||
|
||||
// ShutdownChan is a chan where flows can subscribe to shutdown
|
||||
// event.
|
||||
func (f *FlowContext) ShutdownChan() <-chan struct{} {
|
||||
return f.shutdownChan
|
||||
}
|
||||
|
||||
// SetOnBlockAddedToDAGHandler sets the onBlockAddedToDAG handler
|
||||
func (f *FlowContext) SetOnBlockAddedToDAGHandler(onBlockAddedToDAGHandler OnBlockAddedToDAGHandler) {
|
||||
f.onBlockAddedToDAGHandler = onBlockAddedToDAGHandler
|
||||
|
||||
@@ -50,7 +50,7 @@ func (flow *handleRequestHeadersFlow) start() error {
|
||||
// GetHashesBetween is a relatively heavy operation so we limit it
|
||||
// in order to avoid locking the consensus for too long
|
||||
const maxBlueScoreDifference = 1 << 10
|
||||
blockHashes, _, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
|
||||
blockHashes, err := flow.Domain().Consensus().GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
// SendPingsContext is the interface for the context needed for the SendPings flow.
|
||||
type SendPingsContext interface {
|
||||
ShutdownChan() <-chan struct{}
|
||||
}
|
||||
|
||||
type sendPingsFlow struct {
|
||||
@@ -40,13 +39,7 @@ func (flow *sendPingsFlow) start() error {
|
||||
ticker := time.NewTicker(pingInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-flow.ShutdownChan():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
for range ticker.C {
|
||||
nonce, err := random.Uint64()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -69,4 +62,5 @@ func (flow *sendPingsFlow) start() error {
|
||||
}
|
||||
flow.peer.SetPingIdle()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ type fakeRelayInvsContext struct {
|
||||
rwLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetBlockRelations(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, *externalapi.DomainHash, []*externalapi.DomainHash, error) {
|
||||
func (f *fakeRelayInvsContext) GetBlockChildren(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func (f *fakeRelayInvsContext) GetBlockAcceptanceData(blockHash *externalapi.Dom
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
func (f *fakeRelayInvsContext) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64) (hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) {
|
||||
func (f *fakeRelayInvsContext) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64) ([]*externalapi.DomainHash, error) {
|
||||
panic(errors.Errorf("called unimplemented function from test '%s'", f.testName))
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,6 @@ package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain"
|
||||
|
||||
@@ -20,9 +17,7 @@ import (
|
||||
|
||||
// Manager manages the p2p protocol
|
||||
type Manager struct {
|
||||
context *flowcontext.FlowContext
|
||||
routersWaitGroup sync.WaitGroup
|
||||
isClosed uint32
|
||||
context *flowcontext.FlowContext
|
||||
}
|
||||
|
||||
// NewManager creates a new instance of the p2p protocol manager
|
||||
@@ -37,18 +32,6 @@ func NewManager(cfg *config.Config, domain domain.Domain, netAdapter *netadapter
|
||||
return &manager, nil
|
||||
}
|
||||
|
||||
// Close closes the protocol manager and waits until all p2p flows
|
||||
// finish.
|
||||
func (m *Manager) Close() {
|
||||
if !atomic.CompareAndSwapUint32(&m.isClosed, 0, 1) {
|
||||
panic(errors.New("The protocol manager was already closed"))
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&m.isClosed, 1)
|
||||
m.context.Close()
|
||||
m.routersWaitGroup.Wait()
|
||||
}
|
||||
|
||||
// Peers returns the currently active peers
|
||||
func (m *Manager) Peers() []*peerpkg.Peer {
|
||||
return m.context.Peers()
|
||||
@@ -70,13 +53,11 @@ func (m *Manager) AddBlock(block *externalapi.DomainBlock) error {
|
||||
return m.context.AddBlock(block)
|
||||
}
|
||||
|
||||
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error {
|
||||
flowsWaitGroup.Add(len(flows))
|
||||
func (m *Manager) runFlows(flows []*flow, peer *peerpkg.Peer, errChan <-chan error) error {
|
||||
for _, flow := range flows {
|
||||
executeFunc := flow.executeFunc // extract to new variable so that it's not overwritten
|
||||
spawn(fmt.Sprintf("flow-%s", flow.name), func() {
|
||||
executeFunc(peer)
|
||||
flowsWaitGroup.Done()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/rejects"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/connmanager"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/protocol/flows/addressexchange"
|
||||
@@ -41,13 +41,6 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
// After flows were registered - spawn a new thread that will wait for connection to finish initializing
|
||||
// and start receiving messages
|
||||
spawn("routerInitializer-runFlows", func() {
|
||||
m.routersWaitGroup.Add(1)
|
||||
defer m.routersWaitGroup.Done()
|
||||
|
||||
if atomic.LoadUint32(&m.isClosed) == 1 {
|
||||
panic(errors.Errorf("tried to initialize router when the protocol manager is closed"))
|
||||
}
|
||||
|
||||
isBanned, err := m.context.ConnectionManager().IsBanned(netConnection)
|
||||
if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) {
|
||||
panic(err)
|
||||
@@ -86,17 +79,11 @@ func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *net
|
||||
|
||||
removeHandshakeRoutes(router)
|
||||
|
||||
flowsWaitGroup := &sync.WaitGroup{}
|
||||
err = m.runFlows(flows, peer, errChan, flowsWaitGroup)
|
||||
err = m.runFlows(flows, peer, errChan)
|
||||
if err != nil {
|
||||
m.handleError(err, netConnection, router.OutgoingRoute())
|
||||
// We call `flowsWaitGroup.Wait()` in two places instead of deferring, because
|
||||
// we already defer `m.routersWaitGroup.Done()`, so we try to avoid error prone
|
||||
// and confusing use of multiple dependent defers.
|
||||
flowsWaitGroup.Wait()
|
||||
return
|
||||
}
|
||||
flowsWaitGroup.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -251,7 +238,7 @@ func (m *Manager) registerTransactionRelayFlow(router *routerpkg.Router, isStopp
|
||||
outgoingRoute := router.OutgoingRoute()
|
||||
|
||||
return []*flow{
|
||||
m.registerFlowWithCapacity("HandleRelayedTransactions", 10_000, router,
|
||||
m.registerFlow("HandleRelayedTransactions", router,
|
||||
[]appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan,
|
||||
func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error {
|
||||
return transactionrelay.HandleRelayedTransactions(m.context, incomingRoute, outgoingRoute)
|
||||
@@ -287,24 +274,6 @@ func (m *Manager) registerFlow(name string, router *routerpkg.Router, messageTyp
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowWithCapacity(name string, capacity int, router *routerpkg.Router,
|
||||
messageTypes []appmessage.MessageCommand, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
route, err := router.AddIncomingRouteWithCapacity(capacity, messageTypes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc)
|
||||
}
|
||||
|
||||
func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isStopping *uint32,
|
||||
errChan chan error, initializeFunc flowInitializeFunc) *flow {
|
||||
|
||||
return &flow{
|
||||
name: name,
|
||||
executeFunc: func(peer *peerpkg.Peer) {
|
||||
|
||||
@@ -69,12 +69,12 @@ func (m *Manager) NotifyBlockAddedToDAG(block *externalapi.DomainBlock, blockIns
|
||||
return err
|
||||
}
|
||||
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(block)
|
||||
err = m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, false)
|
||||
msgBlock := appmessage.DomainBlockToMsgBlock(block)
|
||||
blockVerboseData, err := m.context.BuildBlockVerboseData(block.Header, block, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock)
|
||||
blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(msgBlock, blockVerboseData)
|
||||
return m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification)
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pair
|
||||
UTXOEntry: &appmessage.RPCUTXOEntry{
|
||||
Amount: utxoEntry.Amount(),
|
||||
ScriptPublicKey: &appmessage.RPCScriptPublicKey{Script: hex.EncodeToString(utxoEntry.ScriptPublicKey().Script), Version: utxoEntry.ScriptPublicKey().Version},
|
||||
BlockDAAScore: utxoEntry.BlockDAAScore(),
|
||||
BlockBlueScore: utxoEntry.BlockBlueScore(),
|
||||
IsCoinbase: utxoEntry.IsCoinbase(),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -2,16 +2,23 @@ package rpccontext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
difficultyPackage "github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/infrastructure/logger"
|
||||
"github.com/kaspanet/kaspad/util/difficulty"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/hashes"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/estimatedsize"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/txscript"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
@@ -21,6 +28,79 @@ import (
|
||||
// ErrBuildBlockVerboseDataInvalidBlock indicates that a block that was given to BuildBlockVerboseData is invalid.
|
||||
var ErrBuildBlockVerboseDataInvalidBlock = errors.New("ErrBuildBlockVerboseDataInvalidBlock")
|
||||
|
||||
// BuildBlockVerboseData builds a BlockVerboseData from the given blockHeader.
|
||||
// A block may optionally also be given if it's available in the calling context.
|
||||
func (ctx *Context) BuildBlockVerboseData(blockHeader externalapi.BlockHeader, block *externalapi.DomainBlock,
|
||||
includeTransactionVerboseData bool) (*appmessage.BlockVerboseData, error) {
|
||||
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildBlockVerboseData")
|
||||
defer onEnd()
|
||||
|
||||
hash := consensushashing.HeaderHash(blockHeader)
|
||||
|
||||
blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return nil, errors.Wrap(ErrBuildBlockVerboseDataInvalidBlock, "cannot build verbose data for "+
|
||||
"invalid block")
|
||||
}
|
||||
|
||||
childrenHashes, err := ctx.Domain.Consensus().GetBlockChildren(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &appmessage.BlockVerboseData{
|
||||
Hash: hash.String(),
|
||||
Version: blockHeader.Version(),
|
||||
VersionHex: fmt.Sprintf("%08x", blockHeader.Version()),
|
||||
HashMerkleRoot: blockHeader.HashMerkleRoot().String(),
|
||||
AcceptedIDMerkleRoot: blockHeader.AcceptedIDMerkleRoot().String(),
|
||||
UTXOCommitment: blockHeader.UTXOCommitment().String(),
|
||||
ParentHashes: hashes.ToStrings(blockHeader.ParentHashes()),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
Nonce: blockHeader.Nonce(),
|
||||
Time: blockHeader.TimeInMilliseconds(),
|
||||
Bits: strconv.FormatInt(int64(blockHeader.Bits()), 16),
|
||||
Difficulty: ctx.GetDifficultyRatio(blockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus != externalapi.StatusHeaderOnly {
|
||||
if block == nil {
|
||||
block, err = ctx.Domain.Consensus().GetBlock(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
txIDs := make([]string, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txIDs[i] = consensushashing.TransactionID(tx).String()
|
||||
}
|
||||
result.TxIDs = txIDs
|
||||
|
||||
if includeTransactionVerboseData {
|
||||
transactionVerboseData := make([]*appmessage.TransactionVerboseData, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txID := consensushashing.TransactionID(tx).String()
|
||||
data, err := ctx.BuildTransactionVerboseData(tx, txID, blockHeader, hash.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactionVerboseData[i] = data
|
||||
}
|
||||
result.TransactionVerboseData = transactionVerboseData
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetDifficultyRatio returns the proof-of-work difficulty as a multiple of the
|
||||
// minimum difficulty using the passed bits field from the header of a block.
|
||||
func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) float64 {
|
||||
@@ -28,7 +108,7 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
|
||||
// converted back to a number. Note this is not the same as the proof of
|
||||
// work limit directly because the block difficulty is encoded in a block
|
||||
// with the compact form which loses precision.
|
||||
target := difficultyPackage.CompactToBig(bits)
|
||||
target := difficulty.CompactToBig(bits)
|
||||
|
||||
difficulty := new(big.Rat).SetFrac(params.PowMax, target)
|
||||
diff, _ := difficulty.Float64()
|
||||
@@ -39,125 +119,106 @@ func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) fl
|
||||
return diff
|
||||
}
|
||||
|
||||
// PopulateBlockWithVerboseData populates the given `block` with verbose
|
||||
// data from `domainBlockHeader` and optionally from `domainBlock`
|
||||
func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, domainBlockHeader externalapi.BlockHeader,
|
||||
domainBlock *externalapi.DomainBlock, includeTransactionVerboseData bool) error {
|
||||
// BuildTransactionVerboseData builds a TransactionVerboseData from
|
||||
// the given parameters
|
||||
func (ctx *Context) BuildTransactionVerboseData(tx *externalapi.DomainTransaction, txID string,
|
||||
blockHeader externalapi.BlockHeader, blockHash string) (
|
||||
*appmessage.TransactionVerboseData, error) {
|
||||
|
||||
blockHash := consensushashing.HeaderHash(domainBlockHeader)
|
||||
onEnd := logger.LogAndMeasureExecutionTime(log, "BuildTransactionVerboseData")
|
||||
defer onEnd()
|
||||
|
||||
blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
var payloadHash string
|
||||
if tx.SubnetworkID != subnetworks.SubnetworkIDNative {
|
||||
payloadHash = tx.PayloadHash.String()
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusInvalid {
|
||||
return errors.Wrap(ErrBuildBlockVerboseDataInvalidBlock, "cannot build verbose data for "+
|
||||
"invalid block")
|
||||
txReply := &appmessage.TransactionVerboseData{
|
||||
TxID: txID,
|
||||
Hash: consensushashing.TransactionHash(tx).String(),
|
||||
Size: estimatedsize.TransactionEstimatedSerializedSize(tx),
|
||||
TransactionVerboseInputs: ctx.buildTransactionVerboseInputs(tx),
|
||||
TransactionVerboseOutputs: ctx.buildTransactionVerboseOutputs(tx, nil),
|
||||
Version: tx.Version,
|
||||
LockTime: tx.LockTime,
|
||||
SubnetworkID: tx.SubnetworkID.String(),
|
||||
Gas: tx.Gas,
|
||||
PayloadHash: payloadHash,
|
||||
Payload: hex.EncodeToString(tx.Payload),
|
||||
}
|
||||
|
||||
_, selectedParentHash, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
if blockHeader != nil {
|
||||
txReply.Time = uint64(blockHeader.TimeInMilliseconds())
|
||||
txReply.BlockTime = uint64(blockHeader.TimeInMilliseconds())
|
||||
txReply.BlockHash = blockHash
|
||||
}
|
||||
|
||||
block.VerboseData = &appmessage.RPCBlockVerboseData{
|
||||
Hash: blockHash.String(),
|
||||
Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams),
|
||||
ChildrenHashes: hashes.ToStrings(childrenHashes),
|
||||
SelectedParentHash: selectedParentHash.String(),
|
||||
IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly,
|
||||
BlueScore: blockInfo.BlueScore,
|
||||
}
|
||||
return txReply, nil
|
||||
}
|
||||
|
||||
if blockInfo.BlockStatus == externalapi.StatusHeaderOnly {
|
||||
return nil
|
||||
}
|
||||
func (ctx *Context) buildTransactionVerboseInputs(tx *externalapi.DomainTransaction) []*appmessage.TransactionVerboseInput {
|
||||
inputs := make([]*appmessage.TransactionVerboseInput, len(tx.Inputs))
|
||||
for i, transactionInput := range tx.Inputs {
|
||||
// The disassembled string will contain [error] inline
|
||||
// if the script doesn't fully parse, so ignore the
|
||||
// error here.
|
||||
disbuf, _ := txscript.DisasmString(constants.MaxScriptPublicKeyVersion, transactionInput.SignatureScript)
|
||||
|
||||
// Get the block if we didn't receive it previously
|
||||
if domainBlock == nil {
|
||||
domainBlock, err = ctx.Domain.Consensus().GetBlock(blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
input := &appmessage.TransactionVerboseInput{}
|
||||
input.TxID = transactionInput.PreviousOutpoint.TransactionID.String()
|
||||
input.OutputIndex = transactionInput.PreviousOutpoint.Index
|
||||
input.Sequence = transactionInput.Sequence
|
||||
input.ScriptSig = &appmessage.ScriptSig{
|
||||
Asm: disbuf,
|
||||
Hex: hex.EncodeToString(transactionInput.SignatureScript),
|
||||
}
|
||||
inputs[i] = input
|
||||
}
|
||||
|
||||
transactionIDs := make([]string, len(domainBlock.Transactions))
|
||||
for i, transaction := range domainBlock.Transactions {
|
||||
transactionIDs[i] = consensushashing.TransactionID(transaction).String()
|
||||
}
|
||||
block.VerboseData.TransactionIDs = transactionIDs
|
||||
return inputs
|
||||
}
|
||||
|
||||
if includeTransactionVerboseData {
|
||||
for _, transaction := range block.Transactions {
|
||||
err := ctx.PopulateTransactionWithVerboseData(transaction, domainBlockHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
// buildTransactionVerboseOutputs returns a slice of JSON objects for the outputs of the passed
|
||||
// transaction.
|
||||
func (ctx *Context) buildTransactionVerboseOutputs(tx *externalapi.DomainTransaction, filterAddrMap map[string]struct{}) []*appmessage.TransactionVerboseOutput {
|
||||
outputs := make([]*appmessage.TransactionVerboseOutput, len(tx.Outputs))
|
||||
for i, transactionOutput := range tx.Outputs {
|
||||
|
||||
// Ignore the error here since an error means the script
|
||||
// couldn't parse and there is no additional information about
|
||||
// it anyways.
|
||||
scriptClass, addr, _ := txscript.ExtractScriptPubKeyAddress(
|
||||
transactionOutput.ScriptPublicKey, ctx.Config.ActiveNetParams)
|
||||
|
||||
// Encode the addresses while checking if the address passes the
|
||||
// filter when needed.
|
||||
passesFilter := len(filterAddrMap) == 0
|
||||
var encodedAddr string
|
||||
if addr != nil {
|
||||
encodedAddr = addr.EncodeAddress()
|
||||
|
||||
// If the filter doesn't already pass, make it pass if
|
||||
// the address exists in the filter.
|
||||
if _, exists := filterAddrMap[encodedAddr]; exists {
|
||||
passesFilter = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopulateTransactionWithVerboseData populates the given `transaction` with
|
||||
// verbose data from `domainTransaction`
|
||||
func (ctx *Context) PopulateTransactionWithVerboseData(
|
||||
transaction *appmessage.RPCTransaction, domainBlockHeader externalapi.BlockHeader) error {
|
||||
|
||||
domainTransaction, err := appmessage.RPCTransactionToDomainTransaction(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transaction.VerboseData = &appmessage.RPCTransactionVerboseData{
|
||||
TransactionID: consensushashing.TransactionID(domainTransaction).String(),
|
||||
Hash: consensushashing.TransactionHash(domainTransaction).String(),
|
||||
Size: estimatedsize.TransactionEstimatedSerializedSize(domainTransaction),
|
||||
}
|
||||
if domainBlockHeader != nil {
|
||||
transaction.VerboseData.BlockHash = consensushashing.HeaderHash(domainBlockHeader).String()
|
||||
transaction.VerboseData.BlockTime = uint64(domainBlockHeader.TimeInMilliseconds())
|
||||
}
|
||||
for _, input := range transaction.Inputs {
|
||||
ctx.populateTransactionInputWithVerboseData(input)
|
||||
}
|
||||
for _, output := range transaction.Outputs {
|
||||
err := ctx.populateTransactionOutputWithVerboseData(output)
|
||||
if err != nil {
|
||||
return err
|
||||
if !passesFilter {
|
||||
continue
|
||||
}
|
||||
|
||||
output := &appmessage.TransactionVerboseOutput{}
|
||||
output.Index = uint32(i)
|
||||
output.Value = transactionOutput.Value
|
||||
output.ScriptPubKey = &appmessage.ScriptPubKeyResult{
|
||||
Version: transactionOutput.ScriptPublicKey.Version,
|
||||
Address: encodedAddr,
|
||||
Hex: hex.EncodeToString(transactionOutput.ScriptPublicKey.Script),
|
||||
Type: scriptClass.String(),
|
||||
}
|
||||
outputs[i] = output
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *Context) populateTransactionInputWithVerboseData(transactionInput *appmessage.RPCTransactionInput) {
|
||||
transactionInput.VerboseData = &appmessage.RPCTransactionInputVerboseData{}
|
||||
}
|
||||
|
||||
func (ctx *Context) populateTransactionOutputWithVerboseData(transactionOutput *appmessage.RPCTransactionOutput) error {
|
||||
scriptPublicKey, err := hex.DecodeString(transactionOutput.ScriptPublicKey.Script)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domainScriptPublicKey := &externalapi.ScriptPublicKey{
|
||||
Script: scriptPublicKey,
|
||||
Version: transactionOutput.ScriptPublicKey.Version,
|
||||
}
|
||||
|
||||
// Ignore the error here since an error means the script
|
||||
// couldn't be parsed and there's no additional information about
|
||||
// it anyways
|
||||
scriptPublicKeyType, scriptPublicKeyAddress, _ := txscript.ExtractScriptPubKeyAddress(
|
||||
domainScriptPublicKey, ctx.Config.ActiveNetParams)
|
||||
|
||||
var encodedScriptPublicKeyAddress string
|
||||
if scriptPublicKeyAddress != nil {
|
||||
encodedScriptPublicKeyAddress = scriptPublicKeyAddress.EncodeAddress()
|
||||
}
|
||||
transactionOutput.VerboseData = &appmessage.RPCTransactionOutputVerboseData{
|
||||
ScriptPublicKeyType: scriptPublicKeyType.String(),
|
||||
ScriptPublicKeyAddress: encodedScriptPublicKeyAddress,
|
||||
}
|
||||
return nil
|
||||
|
||||
return outputs
|
||||
}
|
||||
|
||||
@@ -26,12 +26,10 @@ func HandleGetBlock(context *rpccontext.Context, _ *router.Router, request appme
|
||||
errorMessage.Error = appmessage.RPCErrorf("Block %s not found", hash)
|
||||
return errorMessage, nil
|
||||
}
|
||||
block := &externalapi.DomainBlock{Header: header}
|
||||
|
||||
response := appmessage.NewGetBlockResponseMessage()
|
||||
response.Block = appmessage.DomainBlockToRPCBlock(block)
|
||||
|
||||
err = context.PopulateBlockWithVerboseData(response.Block, header, nil, getBlockRequest.IncludeTransactionVerboseData)
|
||||
blockVerboseData, err := context.BuildBlockVerboseData(header, nil, getBlockRequest.IncludeTransactionVerboseData)
|
||||
if err != nil {
|
||||
if errors.Is(err, rpccontext.ErrBuildBlockVerboseDataInvalidBlock) {
|
||||
errorMessage := &appmessage.GetBlockResponseMessage{}
|
||||
@@ -41,5 +39,7 @@ func HandleGetBlock(context *rpccontext.Context, _ *router.Router, request appme
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response.BlockVerboseData = blockVerboseData
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -31,12 +31,12 @@ func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, reque
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock)
|
||||
msgBlock := appmessage.DomainBlockToMsgBlock(templateBlock)
|
||||
|
||||
isSynced, err := context.ProtocolManager.ShouldMine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, isSynced), nil
|
||||
return appmessage.NewGetBlockTemplateResponseMessage(msgBlock, isSynced), nil
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ const (
|
||||
func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
getBlocksRequest := request.(*appmessage.GetBlocksRequestMessage)
|
||||
|
||||
// Validate that user didn't set IncludeTransactionVerboseData without setting IncludeBlocks
|
||||
if !getBlocksRequest.IncludeBlocks && getBlocksRequest.IncludeTransactionVerboseData {
|
||||
// Validate that user didn't set IncludeTransactionVerboseData without setting IncludeBlockVerboseData
|
||||
if !getBlocksRequest.IncludeBlockVerboseData && getBlocksRequest.IncludeTransactionVerboseData {
|
||||
return &appmessage.GetBlocksResponseMessage{
|
||||
Error: appmessage.RPCErrorf(
|
||||
"If includeTransactionVerboseData is set, then includeBlockVerboseData must be set as well"),
|
||||
@@ -55,7 +55,8 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockHashes, highHash, err := context.Domain.Consensus().GetHashesBetween(lowHash, virtualSelectedParent, maxBlocksInGetBlocksResponse)
|
||||
blockHashes, err := context.Domain.Consensus().GetHashesBetween(
|
||||
lowHash, virtualSelectedParent, maxBlocksInGetBlocksResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -63,10 +64,9 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
|
||||
// prepend low hash to make it inclusive
|
||||
blockHashes = append([]*externalapi.DomainHash{lowHash}, blockHashes...)
|
||||
|
||||
// If the high hash is equal to virtualSelectedParent it means GetHashesBetween didn't skip any hashes, and
|
||||
// there's space to add the virtualSelectedParent's anticone, otherwise you can't add the anticone because
|
||||
// there's no guarantee that all of the anticone root ancestors will be present.
|
||||
if highHash.Equal(virtualSelectedParent) {
|
||||
// If there are no maxBlocksInGetBlocksResponse between lowHash and virtualSelectedParent -
|
||||
// add virtualSelectedParent's anticone
|
||||
if len(blockHashes) < maxBlocksInGetBlocksResponse {
|
||||
virtualSelectedParentAnticone, err := context.Domain.Consensus().Anticone(virtualSelectedParent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -81,23 +81,26 @@ func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appm
|
||||
}
|
||||
|
||||
// Prepare the response
|
||||
response := appmessage.NewGetBlocksResponseMessage()
|
||||
response.BlockHashes = hashes.ToStrings(blockHashes)
|
||||
if getBlocksRequest.IncludeBlocks {
|
||||
blocks := make([]*appmessage.RPCBlock, len(blockHashes))
|
||||
response := &appmessage.GetBlocksResponseMessage{
|
||||
BlockHashes: hashes.ToStrings(blockHashes),
|
||||
}
|
||||
|
||||
// Retrieve all block data in case BlockVerboseData was requested
|
||||
if getBlocksRequest.IncludeBlockVerboseData {
|
||||
response.BlockVerboseData = make([]*appmessage.BlockVerboseData, len(blockHashes))
|
||||
for i, blockHash := range blockHashes {
|
||||
blockHeader, err := context.Domain.Consensus().GetBlockHeader(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block := &externalapi.DomainBlock{Header: blockHeader}
|
||||
blocks[i] = appmessage.DomainBlockToRPCBlock(block)
|
||||
err = context.PopulateBlockWithVerboseData(blocks[i], blockHeader, nil, getBlocksRequest.IncludeTransactionVerboseData)
|
||||
blockVerboseData, err := context.BuildBlockVerboseData(blockHeader, nil,
|
||||
getBlocksRequest.IncludeTransactionVerboseData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response.BlockVerboseData[i] = blockVerboseData
|
||||
}
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpchandlers"
|
||||
@@ -29,8 +27,6 @@ func (d fakeDomain) MiningManager() miningmanager.MiningManager { return nil }
|
||||
|
||||
func TestHandleGetBlocks(t *testing.T) {
|
||||
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
factory := consensus.NewFactory()
|
||||
tc, teardown, err := factory.NewTestConsensus(params, false, "TestHandleGetBlocks")
|
||||
if err != nil {
|
||||
@@ -59,7 +55,7 @@ func TestHandleGetBlocks(t *testing.T) {
|
||||
antipast := make([]*externalapi.DomainHash, 0, len(slice))
|
||||
|
||||
for _, blockHash := range slice {
|
||||
isInPastOfPovBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, povBlock)
|
||||
isInPastOfPovBlock, err := tc.DAGTopologyManager().IsAncestorOf(blockHash, povBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed doing reachability check: '%v'", err)
|
||||
}
|
||||
@@ -91,7 +87,7 @@ func TestHandleGetBlocks(t *testing.T) {
|
||||
}
|
||||
splitBlocks = append(splitBlocks, blockHash)
|
||||
}
|
||||
sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(stagingArea, splitBlocks, tc, t)))
|
||||
sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(splitBlocks, tc, t)))
|
||||
restOfSplitBlocks, selectedParent := splitBlocks[:len(splitBlocks)-1], splitBlocks[len(splitBlocks)-1]
|
||||
expectedOrder = append(expectedOrder, selectedParent)
|
||||
expectedOrder = append(expectedOrder, restOfSplitBlocks...)
|
||||
|
||||
@@ -8,10 +8,6 @@ import (
|
||||
|
||||
// HandleGetInfo handles the respectively named RPC command
|
||||
func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
response := appmessage.NewGetInfoResponseMessage(
|
||||
context.NetAdapter.ID().String(),
|
||||
uint64(context.Domain.MiningManager().TransactionCount()),
|
||||
)
|
||||
|
||||
response := appmessage.NewGetInfoResponseMessage(context.NetAdapter.ID().String())
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -3,22 +3,25 @@ package rpchandlers
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/app/rpc/rpccontext"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/infrastructure/network/netadapter/router"
|
||||
)
|
||||
|
||||
// HandleGetMempoolEntries handles the respectively named RPC command
|
||||
func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) {
|
||||
|
||||
transactions := context.Domain.MiningManager().AllTransactions()
|
||||
entries := make([]*appmessage.MempoolEntry, 0, len(transactions))
|
||||
for _, transaction := range transactions {
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
for _, tx := range transactions {
|
||||
transactionVerboseData, err := context.BuildTransactionVerboseData(
|
||||
tx, consensushashing.TransactionID(tx).String(), nil, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries = append(entries, &appmessage.MempoolEntry{
|
||||
Fee: transaction.Fee,
|
||||
Transaction: rpcTransaction,
|
||||
Fee: tx.Fee,
|
||||
TransactionVerboseData: transactionVerboseData,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -24,11 +24,12 @@ func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, reques
|
||||
errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID)
|
||||
return errorMessage, nil
|
||||
}
|
||||
rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction)
|
||||
err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil)
|
||||
|
||||
transactionVerboseData, err := context.BuildTransactionVerboseData(
|
||||
transaction, getMempoolEntryRequest.TxID, nil, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction), nil
|
||||
return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, transactionVerboseData), nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,9 @@ import (
|
||||
func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) {
|
||||
submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage)
|
||||
|
||||
msgBlock := submitBlockRequest.Block
|
||||
domainBlock := appmessage.MsgBlockToDomainBlock(msgBlock)
|
||||
|
||||
if context.ProtocolManager.IsIBDRunning() {
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Block not submitted - IBD is running"),
|
||||
@@ -21,15 +24,7 @@ func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request ap
|
||||
}, nil
|
||||
}
|
||||
|
||||
domainBlock, err := appmessage.RPCBlockToDomainBlock(submitBlockRequest.Block)
|
||||
if err != nil {
|
||||
return &appmessage.SubmitBlockResponseMessage{
|
||||
Error: appmessage.RPCErrorf("Could not parse block: %s", err),
|
||||
RejectReason: appmessage.RejectReasonBlockInvalid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = context.ProtocolManager.AddBlock(domainBlock)
|
||||
err := context.ProtocolManager.AddBlock(domainBlock)
|
||||
if err != nil {
|
||||
isProtocolOrRuleError := errors.As(err, &ruleerrors.RuleError{}) || errors.As(err, &protocolerrors.ProtocolError{})
|
||||
if !isProtocolOrRuleError {
|
||||
|
||||
@@ -16,7 +16,7 @@ func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessa
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not parse IP %s", unbanRequest.IP)
|
||||
return errorMessage, nil
|
||||
}
|
||||
err := context.AddressManager.Unban(appmessage.NewNetAddressIPPort(ip, 0))
|
||||
err := context.AddressManager.Unban(appmessage.NewNetAddressIPPort(ip, 0, 0))
|
||||
if err != nil {
|
||||
errorMessage := &appmessage.UnbanResponseMessage{}
|
||||
errorMessage.Error = appmessage.RPCErrorf("Could not unban IP: %s", err)
|
||||
|
||||
@@ -1,11 +1,31 @@
|
||||
|
||||
Kaspad v0.9.0 - 2021-03-04
|
||||
===========================
|
||||
|
||||
* Merge big subdags in pick virtual parents (#1574)
|
||||
* Write in the reject message the tx rejection reason (#1573)
|
||||
* Add nil checks for protowire (#1570)
|
||||
* Increase getBlocks limit to 1000 (#1572)
|
||||
* Return RPC error if getBlock's lowHash doesn't exist (#1569)
|
||||
* Add default dns-seeder to testnet (#1568)
|
||||
* Fix utxoindex deserialization (#1566)
|
||||
* Add pruning point hash to GetBlockDagInfo response (#1565)
|
||||
* Use EmitUnpopulated so that kaspactl prints all fields, even the default ones (#1561)
|
||||
* Stop logging an error whenever an RPC/P2P connection is canceled (#1562)
|
||||
* Cleanup the logger and make it asynchronous (#1524)
|
||||
* Close all iterators (#1542)
|
||||
* Add childrenHashes to GetBlock/s RPC commands (#1560)
|
||||
* Add ScriptPublicKey.Version to RPC (#1559)
|
||||
* Fix the target block rate to create less bursty mining (#1554)
|
||||
|
||||
Kaspad v0.8.10 - 2021-02-25
|
||||
===========================
|
||||
|
||||
[*] Fix bug where invalid mempool transactions were not removed (#1551)
|
||||
[*] Add RPC reconnection to the miner (#1552)
|
||||
[*] Remove virtual diff parents - only selectedTip is virtualDiffParent now (#1550)
|
||||
[*] Fix UTXO index (#1548)
|
||||
[*] Prevent fast failing (#1545)
|
||||
[*] Increase the sleep time in kaspaminer when the node is not synced (#1544)
|
||||
[*] Disallow header only blocks on RPC, relay and when requesting IBD full blocks (#1537)
|
||||
[*] Make templateManager hold a DomainBlock and isSynced bool instead of a GetBlockTemplateResponseMessage (#1538)
|
||||
* Fix bug where invalid mempool transactions were not removed (#1551)
|
||||
* Add RPC reconnection to the miner (#1552)
|
||||
* Remove virtual diff parents - only selectedTip is virtualDiffParent now (#1550)
|
||||
* Fix UTXO index (#1548)
|
||||
* Prevent fast failing (#1545)
|
||||
* Increase the sleep time in kaspaminer when the node is not synced (#1544)
|
||||
* Disallow header only blocks on RPC, relay and when requesting IBD full blocks (#1537)
|
||||
* Make templateManager hold a DomainBlock and isSynced bool instead of a GetBlockTemplateResponseMessage (#1538)
|
||||
|
||||
@@ -24,9 +24,9 @@ const (
|
||||
|
||||
var (
|
||||
// Default configuration options
|
||||
defaultAppDir = util.AppDir("kaspaminer", false)
|
||||
defaultLogFile = filepath.Join(defaultAppDir, defaultLogFilename)
|
||||
defaultErrLogFile = filepath.Join(defaultAppDir, defaultErrLogFilename)
|
||||
defaultHomeDir = util.AppDataDir("kaspaminer", false)
|
||||
defaultLogFile = filepath.Join(defaultHomeDir, defaultLogFilename)
|
||||
defaultErrLogFile = filepath.Join(defaultHomeDir, defaultErrLogFilename)
|
||||
defaultRPCServer = "localhost"
|
||||
)
|
||||
|
||||
|
||||
@@ -44,32 +44,34 @@ func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond
|
||||
|
||||
spawn("blocksLoop", func() {
|
||||
const windowSize = 10
|
||||
var expectedDurationForWindow time.Duration
|
||||
var windowExpectedEndTime time.Time
|
||||
hasBlockRateTarget := targetBlocksPerSecond != 0
|
||||
var windowTicker, blockTicker *time.Ticker
|
||||
// We use tickers to limit the block rate:
|
||||
// 1. windowTicker -> makes sure that the last windowSize blocks take at least windowSize*targetBlocksPerSecond.
|
||||
// 2. blockTicker -> makes sure that each block takes at least targetBlocksPerSecond/windowSize.
|
||||
// that way we both allow for fluctuation in block rate but also make sure they're not too big (by an order of magnitude)
|
||||
if hasBlockRateTarget {
|
||||
windowRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond / windowSize))
|
||||
blockRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond * windowSize))
|
||||
log.Infof("Minimum average time per %d blocks: %s, smaller minimum time per block: %s", windowSize, windowRate, blockRate)
|
||||
windowTicker = time.NewTicker(windowRate)
|
||||
blockTicker = time.NewTicker(blockRate)
|
||||
defer windowTicker.Stop()
|
||||
defer blockTicker.Stop()
|
||||
expectedDurationForWindow = time.Duration(float64(windowSize)/targetBlocksPerSecond) * time.Second
|
||||
windowExpectedEndTime = time.Now().Add(expectedDurationForWindow)
|
||||
}
|
||||
windowStart := time.Now()
|
||||
for blockIndex := 1; ; blockIndex++ {
|
||||
blockInWindowIndex := 0
|
||||
|
||||
sleepTime := 0 * time.Second
|
||||
|
||||
for {
|
||||
foundBlockChan <- mineNextBlock(mineWhenNotSynced)
|
||||
|
||||
if hasBlockRateTarget {
|
||||
<-blockTicker.C
|
||||
if (blockIndex % windowSize) == 0 {
|
||||
tickerStart := time.Now()
|
||||
<-windowTicker.C
|
||||
log.Infof("Finished mining %d blocks in: %s. slept for: %s", windowSize, time.Since(windowStart), time.Since(tickerStart))
|
||||
windowStart = time.Now()
|
||||
blockInWindowIndex++
|
||||
if blockInWindowIndex == windowSize-1 {
|
||||
deviation := windowExpectedEndTime.Sub(time.Now())
|
||||
if deviation > 0 {
|
||||
sleepTime = deviation / windowSize
|
||||
log.Infof("Finished to mine %d blocks %s earlier than expected. Setting the miner "+
|
||||
"to sleep %s between blocks to compensate",
|
||||
windowSize, deviation, sleepTime)
|
||||
}
|
||||
blockInWindowIndex = 0
|
||||
windowExpectedEndTime = time.Now().Add(expectedDurationForWindow)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -198,11 +200,7 @@ func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan er
|
||||
errChan <- errors.Wrapf(err, "Error getting block template from %s", client.safeRPCClient().Address())
|
||||
return
|
||||
}
|
||||
err = templatemanager.Set(template)
|
||||
if err != nil {
|
||||
errChan <- errors.Wrapf(err, "Error setting block template from %s", client.safeRPCClient().Address())
|
||||
return
|
||||
}
|
||||
templatemanager.Set(template)
|
||||
}
|
||||
|
||||
getBlockTemplate()
|
||||
|
||||
@@ -23,14 +23,10 @@ func Get() (*externalapi.DomainBlock, bool) {
|
||||
}
|
||||
|
||||
// Set sets the current template to work on
|
||||
func Set(template *appmessage.GetBlockTemplateResponseMessage) error {
|
||||
block, err := appmessage.RPCBlockToDomainBlock(template.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func Set(template *appmessage.GetBlockTemplateResponseMessage) {
|
||||
block := appmessage.MsgBlockToDomainBlock(template.MsgBlock)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
currentTemplate = block
|
||||
isSynced = template.IsSynced
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,8 +10,7 @@ func isUTXOSpendable(entry *appmessage.UTXOsByAddressesEntry, virtualSelectedPar
|
||||
if !entry.UTXOEntry.IsCoinbase {
|
||||
return true
|
||||
}
|
||||
blockBlueScore := entry.UTXOEntry.BlockDAAScore
|
||||
// TODO: Check for a better alternative than virtualSelectedParentBlueScore
|
||||
blockBlueScore := entry.UTXOEntry.BlockBlueScore
|
||||
return blockBlueScore+coinbaseMaturity < virtualSelectedParentBlueScore
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func create(conf *createConfig) error {
|
||||
privateKey, err := secp256k1.GenerateSchnorrKeyPair()
|
||||
privateKey, err := secp256k1.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to generate private key")
|
||||
}
|
||||
|
||||
@@ -3,11 +3,9 @@ package main
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/kaspanet/go-secp256k1"
|
||||
"github.com/kaspanet/kaspad/app/appmessage"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/constants"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/subnetworks"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/transactionid"
|
||||
@@ -77,7 +75,7 @@ func parsePrivateKey(privateKeyHex string) (*secp256k1.SchnorrKeyPair, *secp256k
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Error parsing private key hex")
|
||||
}
|
||||
keyPair, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes)
|
||||
keyPair, err := secp256k1.DeserializePrivateKeyFromSlice(privateKeyBytes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Error deserializing private key")
|
||||
}
|
||||
@@ -179,12 +177,11 @@ func generateTransaction(keyPair *secp256k1.SchnorrKeyPair, selectedUTXOs []*app
|
||||
SubnetworkID: subnetworks.SubnetworkIDNative,
|
||||
Gas: 0,
|
||||
Payload: nil,
|
||||
PayloadHash: externalapi.DomainHash{},
|
||||
}
|
||||
sighashReusedValues := &consensushashing.SighashReusedValues{}
|
||||
|
||||
for i, input := range domainTransaction.Inputs {
|
||||
signatureScript, err := txscript.SignatureScript(
|
||||
domainTransaction, i, consensushashing.SigHashAll, keyPair, sighashReusedValues)
|
||||
signatureScript, err := txscript.SignatureScript(domainTransaction, i, fromScript, txscript.SigHashAll, keyPair)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ type consensus struct {
|
||||
utxoDiffStore model.UTXODiffStore
|
||||
finalityStore model.FinalityStore
|
||||
headersSelectedChainStore model.HeadersSelectedChainStore
|
||||
daaBlocksStore model.DAABlocksStore
|
||||
}
|
||||
|
||||
// BuildBlock builds a block over the current state, with the transactions
|
||||
@@ -75,34 +74,30 @@ func (s *consensus) ValidateTransactionAndPopulateWithConsensusData(transaction
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.transactionValidator.ValidateTransactionInIsolation(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.consensusStateManager.PopulateTransactionWithUTXOEntries(stagingArea, transaction)
|
||||
err = s.consensusStateManager.PopulateTransactionWithUTXOEntries(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualSelectedParentMedianTime, err := s.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash)
|
||||
virtualSelectedParentMedianTime, err := s.pastMedianTimeManager.PastMedianTime(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.transactionValidator.ValidateTransactionInContextAndPopulateMassAndFee(
|
||||
stagingArea, transaction, model.VirtualBlockHash, virtualSelectedParentMedianTime)
|
||||
return s.transactionValidator.ValidateTransactionInContextAndPopulateMassAndFee(transaction,
|
||||
model.VirtualBlockHash, virtualSelectedParentMedianTime)
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlock(blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
block, err := s.blockStore.Block(s.databaseContext, stagingArea, blockHash)
|
||||
block, err := s.blockStore.Block(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
if errors.Is(err, database.ErrNotFound) {
|
||||
return nil, errors.Wrapf(err, "block %s does not exist", blockHash)
|
||||
@@ -116,9 +111,7 @@ func (s *consensus) GetBlockHeader(blockHash *externalapi.DomainHash) (externala
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
blockHeader, err := s.blockHeaderStore.BlockHeader(s.databaseContext, stagingArea, blockHash)
|
||||
blockHeader, err := s.blockHeaderStore.BlockHeader(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
if errors.Is(err, database.ErrNotFound) {
|
||||
return nil, errors.Wrapf(err, "block header %s does not exist", blockHash)
|
||||
@@ -132,11 +125,9 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
blockInfo := &externalapi.BlockInfo{}
|
||||
|
||||
exists, err := s.blockStatusStore.Exists(s.databaseContext, stagingArea, blockHash)
|
||||
exists, err := s.blockStatusStore.Exists(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -145,7 +136,7 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
return blockInfo, nil
|
||||
}
|
||||
|
||||
blockStatus, err := s.blockStatusStore.Get(s.databaseContext, stagingArea, blockHash)
|
||||
blockStatus, err := s.blockStatusStore.Get(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -156,7 +147,7 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
return blockInfo, nil
|
||||
}
|
||||
|
||||
ghostdagData, err := s.ghostdagDataStore.Get(s.databaseContext, stagingArea, blockHash)
|
||||
ghostdagData, err := s.ghostdagDataStore.Get(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -166,74 +157,55 @@ func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalap
|
||||
return blockInfo, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) (
|
||||
parents []*externalapi.DomainHash, selectedParent *externalapi.DomainHash,
|
||||
children []*externalapi.DomainHash, err error) {
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
blockRelation, err := s.blockRelationStore.BlockRelation(s.databaseContext, stagingArea, blockHash)
|
||||
func (s *consensus) GetBlockChildren(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
blockRelation, err := s.blockRelationStore.BlockRelation(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blockGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, stagingArea, blockHash)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return blockRelation.Parents, blockGHOSTDAGData.SelectedParent(), blockRelation.Children, nil
|
||||
return blockRelation.Children, nil
|
||||
}
|
||||
|
||||
func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, blockHash)
|
||||
err := s.validateBlockHashExists(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.acceptanceDataStore.Get(s.databaseContext, stagingArea, blockHash)
|
||||
return s.acceptanceDataStore.Get(s.databaseContext, blockHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlueScoreDifference uint64) (
|
||||
hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) {
|
||||
func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash,
|
||||
maxBlueScoreDifference uint64) ([]*externalapi.DomainHash, error) {
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err = s.validateBlockHashExists(stagingArea, lowHash)
|
||||
err := s.validateBlockHashExists(lowHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
err = s.validateBlockHashExists(stagingArea, highHash)
|
||||
err = s.validateBlockHashExists(highHash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.syncManager.GetHashesBetween(stagingArea, lowHash, highHash, maxBlueScoreDifference)
|
||||
return s.syncManager.GetHashesBetween(lowHash, highHash, maxBlueScoreDifference)
|
||||
}
|
||||
|
||||
func (s *consensus) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, highHash)
|
||||
err := s.validateBlockHashExists(highHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.syncManager.GetMissingBlockBodyHashes(stagingArea, highHash)
|
||||
return s.syncManager.GetMissingBlockBodyHashes(highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetPruningPointUTXOs(expectedPruningPointHash *externalapi.DomainHash,
|
||||
@@ -242,9 +214,7 @@ func (s *consensus) GetPruningPointUTXOs(expectedPruningPointHash *externalapi.D
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
pruningPointHash, err := s.pruningStore.PruningPoint(s.databaseContext, stagingArea)
|
||||
pruningPointHash, err := s.pruningStore.PruningPoint(s.databaseContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -268,9 +238,7 @@ func (s *consensus) GetVirtualUTXOs(expectedVirtualParents []*externalapi.Domain
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
virtualParents, err := s.dagTopologyManager.Parents(stagingArea, model.VirtualBlockHash)
|
||||
virtualParents, err := s.dagTopologyManager.Parents(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -292,9 +260,7 @@ func (s *consensus) PruningPoint() (*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
return s.pruningStore.PruningPoint(s.databaseContext, stagingArea)
|
||||
return s.pruningStore.PruningPoint(s.databaseContext)
|
||||
}
|
||||
|
||||
func (s *consensus) ClearImportedPruningPointData() error {
|
||||
@@ -322,9 +288,7 @@ func (s *consensus) GetVirtualSelectedParent() (*externalapi.DomainHash, error)
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, stagingArea, model.VirtualBlockHash)
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -335,30 +299,26 @@ func (s *consensus) Tips() ([]*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
return s.consensusStateStore.Tips(stagingArea, s.databaseContext)
|
||||
return s.consensusStateStore.Tips(s.databaseContext)
|
||||
}
|
||||
|
||||
func (s *consensus) GetVirtualInfo() (*externalapi.VirtualInfo, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
blockRelations, err := s.blockRelationStore.BlockRelation(s.databaseContext, stagingArea, model.VirtualBlockHash)
|
||||
blockRelations, err := s.blockRelationStore.BlockRelation(s.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bits, err := s.difficultyManager.RequiredDifficulty(stagingArea, model.VirtualBlockHash)
|
||||
bits, err := s.difficultyManager.RequiredDifficulty(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pastMedianTime, err := s.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash)
|
||||
pastMedianTime, err := s.pastMedianTimeManager.PastMedianTime(model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, stagingArea, model.VirtualBlockHash)
|
||||
virtualGHOSTDAGData, err := s.ghostdagDataStore.Get(s.databaseContext, model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -375,87 +335,76 @@ func (s *consensus) CreateBlockLocator(lowHash, highHash *externalapi.DomainHash
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, lowHash)
|
||||
err := s.validateBlockHashExists(lowHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.validateBlockHashExists(stagingArea, highHash)
|
||||
err = s.validateBlockHashExists(highHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.syncManager.CreateBlockLocator(stagingArea, lowHash, highHash, limit)
|
||||
return s.syncManager.CreateBlockLocator(lowHash, highHash, limit)
|
||||
}
|
||||
|
||||
func (s *consensus) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
lowHash, err := s.pruningStore.PruningPoint(s.databaseContext, stagingArea)
|
||||
lowHash, err := s.pruningStore.PruningPoint(s.databaseContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
highHash, err := s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext, stagingArea)
|
||||
highHash, err := s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.syncManager.CreateHeadersSelectedChainBlockLocator(stagingArea, lowHash, highHash)
|
||||
return s.syncManager.CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
func (s *consensus) CreateHeadersSelectedChainBlockLocator(lowHash,
|
||||
highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
return s.syncManager.CreateHeadersSelectedChainBlockLocator(stagingArea, lowHash, highHash)
|
||||
return s.syncManager.CreateHeadersSelectedChainBlockLocator(lowHash, highHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetSyncInfo() (*externalapi.SyncInfo, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
return s.syncManager.GetSyncInfo(stagingArea)
|
||||
return s.syncManager.GetSyncInfo()
|
||||
}
|
||||
|
||||
func (s *consensus) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, blockHash)
|
||||
err := s.validateBlockHashExists(blockHash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return s.pruningManager.IsValidPruningPoint(stagingArea, blockHash)
|
||||
return s.pruningManager.IsValidPruningPoint(blockHash)
|
||||
}
|
||||
|
||||
func (s *consensus) GetVirtualSelectedParentChainFromBlock(blockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, blockHash)
|
||||
err := s.validateBlockHashExists(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.consensusStateManager.GetVirtualSelectedParentChainFromBlock(stagingArea, blockHash)
|
||||
return s.consensusStateManager.GetVirtualSelectedParentChainFromBlock(blockHash)
|
||||
}
|
||||
|
||||
func (s *consensus) validateBlockHashExists(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error {
|
||||
exists, err := s.blockStatusStore.Exists(s.databaseContext, stagingArea, blockHash)
|
||||
func (s *consensus) validateBlockHashExists(blockHash *externalapi.DomainHash) error {
|
||||
exists, err := s.blockStatusStore.Exists(s.databaseContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -469,39 +418,33 @@ func (s *consensus) IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, blockHashA)
|
||||
err := s.validateBlockHashExists(blockHashA)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = s.validateBlockHashExists(stagingArea, blockHashB)
|
||||
err = s.validateBlockHashExists(blockHashB)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return s.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, blockHashA, blockHashB)
|
||||
return s.dagTopologyManager.IsInSelectedParentChainOf(blockHashA, blockHashB)
|
||||
}
|
||||
|
||||
func (s *consensus) GetHeadersSelectedTip() (*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
return s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext, stagingArea)
|
||||
return s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext)
|
||||
}
|
||||
|
||||
func (s *consensus) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
err := s.validateBlockHashExists(stagingArea, blockHash)
|
||||
err := s.validateBlockHashExists(blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.dagTraversalManager.Anticone(stagingArea, blockHash)
|
||||
return s.dagTraversalManager.Anticone(blockHash)
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ type GHOSTDAGManagerConstructor func(model.DBReader, model.DAGTopologyManager,
|
||||
|
||||
// DifficultyManagerConstructor is the function signature for a constructor of a type implementing model.DifficultyManager
|
||||
type DifficultyManagerConstructor func(model.DBReader, model.GHOSTDAGManager, model.GHOSTDAGDataStore,
|
||||
model.BlockHeaderStore, model.DAABlocksStore, model.DAGTopologyManager, model.DAGTraversalManager, *big.Int, int, bool, time.Duration,
|
||||
model.BlockHeaderStore, model.DAGTopologyManager, model.DAGTraversalManager, *big.Int, int, bool, time.Duration,
|
||||
*externalapi.DomainHash) model.DifficultyManager
|
||||
|
||||
// PastMedianTimeManagerConstructor is the function signature for a constructor of a type implementing model.PastMedianTimeManager
|
||||
type PastMedianTimeManagerConstructor func(int, model.DBReader, model.DAGTraversalManager, model.BlockHeaderStore,
|
||||
model.GHOSTDAGDataStore, *externalapi.DomainHash) model.PastMedianTimeManager
|
||||
model.GHOSTDAGDataStore) model.PastMedianTimeManager
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
package binaryserialization
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const uint64Length = 8
|
||||
|
||||
// SerializeUint64 serializes a uint64
|
||||
func SerializeUint64(value uint64) []byte {
|
||||
var keyBytes [uint64Length]byte
|
||||
binary.LittleEndian.PutUint64(keyBytes[:], value)
|
||||
return keyBytes[:]
|
||||
}
|
||||
|
||||
// DeserializeUint64 deserializes bytes to uint64
|
||||
func DeserializeUint64(valueBytes []byte) (uint64, error) {
|
||||
if len(valueBytes) != uint64Length {
|
||||
return 0, errors.Errorf("the given value is %d bytes so it cannot be deserialized into uint64",
|
||||
len(valueBytes))
|
||||
}
|
||||
return binary.LittleEndian.Uint64(valueBytes), nil
|
||||
}
|
||||
@@ -1,47 +1,13 @@
|
||||
package binaryserialization
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
|
||||
// SerializeHash serializes hash to a slice of bytes
|
||||
func SerializeHash(hash *externalapi.DomainHash) []byte {
|
||||
return hash.ByteSlice()
|
||||
}
|
||||
|
||||
// DeserializeHash deserializes a slice of bytes to a hash
|
||||
// DeserializeHash a slice of bytes to a hash
|
||||
func DeserializeHash(hashBytes []byte) (*externalapi.DomainHash, error) {
|
||||
return externalapi.NewDomainHashFromByteSlice(hashBytes)
|
||||
}
|
||||
|
||||
// SerializeHashes serializes a slice of hashes to a slice of bytes
|
||||
func SerializeHashes(hashes []*externalapi.DomainHash) []byte {
|
||||
buff := make([]byte, len(hashes)*externalapi.DomainHashSize)
|
||||
for i, hash := range hashes {
|
||||
copy(buff[externalapi.DomainHashSize*i:], hash.ByteSlice())
|
||||
}
|
||||
|
||||
return buff
|
||||
}
|
||||
|
||||
// DeserializeHashes deserializes a slice of bytes to a slice of hashes
|
||||
func DeserializeHashes(hashesBytes []byte) ([]*externalapi.DomainHash, error) {
|
||||
if len(hashesBytes)%externalapi.DomainHashSize != 0 {
|
||||
return nil, errors.Errorf("The length of hashBytes is not divisible by externalapi.DomainHashSize (%d)",
|
||||
externalapi.DomainHashSize)
|
||||
}
|
||||
|
||||
numHashes := len(hashesBytes) / externalapi.DomainHashSize
|
||||
hashes := make([]*externalapi.DomainHash, numHashes)
|
||||
for i := 0; i < numHashes; i++ {
|
||||
var err error
|
||||
start := i * externalapi.DomainHashSize
|
||||
end := i*externalapi.DomainHashSize + externalapi.DomainHashSize
|
||||
hashes[i], err = externalapi.NewDomainHashFromByteSlice(hashesBytes[start:end])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
package binaryserialization
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// SerializeChainBlockIndex serializes chain block index
|
||||
func SerializeChainBlockIndex(index uint64) []byte {
|
||||
var keyBytes [8]byte
|
||||
binary.LittleEndian.PutUint64(keyBytes[:], index)
|
||||
return keyBytes[:]
|
||||
}
|
||||
|
||||
// DeserializeChainBlockIndex deserializes chain block index to uint64
|
||||
func DeserializeChainBlockIndex(indexBytes []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(indexBytes)
|
||||
}
|
||||
@@ -241,6 +241,7 @@ type DbTransaction struct {
|
||||
LockTime uint64 `protobuf:"varint,4,opt,name=lockTime,proto3" json:"lockTime,omitempty"`
|
||||
SubnetworkID *DbSubnetworkId `protobuf:"bytes,5,opt,name=subnetworkID,proto3" json:"subnetworkID,omitempty"`
|
||||
Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"`
|
||||
PayloadHash *DbHash `protobuf:"bytes,7,opt,name=payloadHash,proto3" json:"payloadHash,omitempty"`
|
||||
Payload []byte `protobuf:"bytes,8,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
}
|
||||
|
||||
@@ -318,6 +319,13 @@ func (x *DbTransaction) GetGas() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DbTransaction) GetPayloadHash() *DbHash {
|
||||
if x != nil {
|
||||
return x.PayloadHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DbTransaction) GetPayload() []byte {
|
||||
if x != nil {
|
||||
return x.Payload
|
||||
@@ -1220,7 +1228,7 @@ type DbUtxoEntry struct {
|
||||
|
||||
Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"`
|
||||
ScriptPublicKey *DbScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"`
|
||||
BlockDaaScore uint64 `protobuf:"varint,3,opt,name=blockDaaScore,proto3" json:"blockDaaScore,omitempty"`
|
||||
BlockBlueScore uint64 `protobuf:"varint,3,opt,name=blockBlueScore,proto3" json:"blockBlueScore,omitempty"`
|
||||
IsCoinbase bool `protobuf:"varint,4,opt,name=isCoinbase,proto3" json:"isCoinbase,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1270,9 +1278,9 @@ func (x *DbUtxoEntry) GetScriptPublicKey() *DbScriptPublicKey {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DbUtxoEntry) GetBlockDaaScore() uint64 {
|
||||
func (x *DbUtxoEntry) GetBlockBlueScore() uint64 {
|
||||
if x != nil {
|
||||
return x.BlockDaaScore
|
||||
return x.BlockBlueScore
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -1645,7 +1653,7 @@ var file_dbobjects_proto_rawDesc = []byte{
|
||||
0x28, 0x0d, 0x52, 0x04, 0x62, 0x69, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63,
|
||||
0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x1c,
|
||||
0x0a, 0x06, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0xad, 0x02, 0x0a,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0xe6, 0x02, 0x0a,
|
||||
0x0d, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75,
|
||||
@@ -1663,184 +1671,188 @@ var file_dbobjects_proto_rawDesc = []byte{
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
|
||||
0x49, 0x64, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44,
|
||||
0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67,
|
||||
0x61, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xa1, 0x01, 0x0a,
|
||||
0x12, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
|
||||
0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f,
|
||||
0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
|
||||
0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f,
|
||||
0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x69,
|
||||
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63,
|
||||
0x72, 0x69, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65,
|
||||
0x22, 0x68, 0x0a, 0x0a, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x44,
|
||||
0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73,
|
||||
0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
|
||||
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0b,
|
||||
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70,
|
||||
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61,
|
||||
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x12, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e,
|
||||
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x10,
|
||||
0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e,
|
||||
0x74, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f,
|
||||
0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
|
||||
0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x69,
|
||||
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x1a, 0x0a,
|
||||
0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x68, 0x0a, 0x0a, 0x44, 0x62, 0x4f,
|
||||
0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73,
|
||||
0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x0d,
|
||||
0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x22, 0x37, 0x0a, 0x0f, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74,
|
||||
0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x77, 0x0a, 0x13,
|
||||
0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74,
|
||||
0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||
0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c,
|
||||
0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x34, 0x0a, 0x0e, 0x44, 0x62, 0x53, 0x75, 0x62, 0x6e, 0x65,
|
||||
0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65,
|
||||
0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73,
|
||||
0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x22, 0x6a, 0x0a, 0x10, 0x44,
|
||||
0x62, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12,
|
||||
0x56, 0x0a, 0x13, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e,
|
||||
0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61,
|
||||
0x74, 0x61, 0x52, 0x13, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61,
|
||||
0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb6, 0x01, 0x0a, 0x15, 0x44, 0x62, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74,
|
||||
0x61, 0x12, 0x68, 0x0a, 0x19, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61,
|
||||
0x52, 0x19, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63,
|
||||
0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x09, 0x62,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68,
|
||||
0x22, 0xed, 0x01, 0x0a, 0x1b, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61,
|
||||
0x12, 0x3e, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x37, 0x0a, 0x0f, 0x44, 0x62,
|
||||
0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x24, 0x0a,
|
||||
0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x49, 0x64, 0x22, 0x77, 0x0a, 0x13, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x4a, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
|
||||
0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x65, 0x72, 0x69,
|
||||
0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x53, 0x63, 0x72, 0x69,
|
||||
0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x34, 0x0a, 0x0e,
|
||||
0x44, 0x62, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x22,
|
||||
0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
|
||||
0x49, 0x64, 0x22, 0x6a, 0x0a, 0x10, 0x44, 0x62, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e,
|
||||
0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x56, 0x0a, 0x13, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41,
|
||||
0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70,
|
||||
0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x13, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
|
||||
0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb6,
|
||||
0x01, 0x0a, 0x15, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74,
|
||||
0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x68, 0x0a, 0x19, 0x74, 0x72, 0x61, 0x6e,
|
||||
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63,
|
||||
0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72,
|
||||
0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61,
|
||||
0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61,
|
||||
0x74, 0x61, 0x12, 0x33, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x09, 0x62, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0xed, 0x01, 0x0a, 0x1b, 0x44, 0x62, 0x54, 0x72,
|
||||
0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61,
|
||||
0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73,
|
||||
0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54,
|
||||
0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e,
|
||||
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x66, 0x65, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x66, 0x65, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x41,
|
||||
0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69,
|
||||
0x73, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x5c, 0x0a, 0x1b, 0x74, 0x72, 0x61,
|
||||
0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x74, 0x78,
|
||||
0x6f, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1b, 0x74, 0x72, 0x61, 0x6e,
|
||||
0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x74, 0x78, 0x6f,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x76, 0x0a, 0x10, 0x44, 0x62, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70,
|
||||
0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48,
|
||||
0x61, 0x73, 0x68, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x08,
|
||||
0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22,
|
||||
0x27, 0x0a, 0x0d, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
|
||||
0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xdb, 0x02, 0x0a, 0x13, 0x44, 0x62, 0x42,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61,
|
||||
0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x3d, 0x0a, 0x0e, 0x73, 0x65,
|
||||
0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63,
|
||||
0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0d, 0x6d, 0x65, 0x72,
|
||||
0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65,
|
||||
0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53,
|
||||
0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48,
|
||||
0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64,
|
||||
0x73, 0x12, 0x53, 0x0a, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f,
|
||||
0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
|
||||
0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a,
|
||||
0x65, 0x73, 0x52, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e,
|
||||
0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x22, 0x6d, 0x0a, 0x14, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65,
|
||||
0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x31,
|
||||
0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73,
|
||||
0x68, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e,
|
||||
0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x28, 0x0a, 0x0a, 0x44, 0x62, 0x4d, 0x75, 0x6c, 0x74, 0x69,
|
||||
0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x22,
|
||||
0x46, 0x0a, 0x09, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x12, 0x39, 0x0a, 0x05,
|
||||
0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74,
|
||||
0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d,
|
||||
0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x62, 0x55, 0x74,
|
||||
0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d,
|
||||
0x12, 0x35, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f,
|
||||
0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45,
|
||||
0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x65, 0x72,
|
||||
0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78,
|
||||
0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x22, 0x45, 0x0a, 0x11, 0x44, 0x62, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62,
|
||||
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb7, 0x01, 0x0a, 0x0b, 0x44, 0x62, 0x55,
|
||||
0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75,
|
||||
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x4a, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
|
||||
0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x65, 0x72, 0x69,
|
||||
0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x53, 0x63, 0x72, 0x69,
|
||||
0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d,
|
||||
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f,
|
||||
0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61,
|
||||
0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62,
|
||||
0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x31, 0x0a, 0x08, 0x63, 0x68, 0x69,
|
||||
0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65,
|
||||
0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x10, 0x0a, 0x03, 0x66, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x66,
|
||||
0x65, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74,
|
||||
0x65, 0x64, 0x12, 0x5c, 0x0a, 0x1b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65,
|
||||
0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
|
||||
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e,
|
||||
0x74, 0x72, 0x79, 0x52, 0x1b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x49, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
|
||||
0x22, 0x76, 0x0a, 0x10, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18,
|
||||
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x07, 0x70, 0x61,
|
||||
0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65,
|
||||
0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
|
||||
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08,
|
||||
0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0d, 0x44, 0x62, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x22, 0xdb, 0x02, 0x0a, 0x13, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f,
|
||||
0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75,
|
||||
0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c,
|
||||
0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57,
|
||||
0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57,
|
||||
0x6f, 0x72, 0x6b, 0x12, 0x3d, 0x0a, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50,
|
||||
0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61,
|
||||
0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x06,
|
||||
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48,
|
||||
0x61, 0x73, 0x68, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69,
|
||||
0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
|
||||
0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65,
|
||||
0x72, 0x76, 0x61, 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43,
|
||||
0x0a, 0x11, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x53, 0x65, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69,
|
||||
0x73, 0x68, 0x52, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65,
|
||||
0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c,
|
||||
0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69,
|
||||
0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68,
|
||||
0x52, 0x11, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67,
|
||||
0x53, 0x65, 0x74, 0x22, 0x40, 0x0a, 0x16, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62,
|
||||
0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74,
|
||||
0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x0a, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f,
|
||||
0x44, 0x69, 0x66, 0x66, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x12,
|
||||
0x3f, 0x0a, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
|
||||
0x22, 0x33, 0x0a, 0x06, 0x44, 0x62, 0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69,
|
||||
0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52,
|
||||
0x04, 0x74, 0x69, 0x70, 0x73, 0x22, 0x24, 0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
|
||||
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44,
|
||||
0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e,
|
||||
0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b,
|
||||
0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x52, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x12,
|
||||
0x39, 0x0a, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x18,
|
||||
0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x65,
|
||||
0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x12, 0x53, 0x0a, 0x12, 0x62, 0x6c,
|
||||
0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73,
|
||||
0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e,
|
||||
0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x52, 0x12, 0x62, 0x6c, 0x75,
|
||||
0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x22,
|
||||
0x6d, 0x0a, 0x14, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f,
|
||||
0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48,
|
||||
0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69,
|
||||
0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68,
|
||||
0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6e,
|
||||
0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
|
||||
0x52, 0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x28,
|
||||
0x0a, 0x0a, 0x44, 0x62, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x6d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08,
|
||||
0x6d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x22, 0x46, 0x0a, 0x09, 0x44, 0x62, 0x55, 0x74,
|
||||
0x78, 0x6f, 0x53, 0x65, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73,
|
||||
0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x6f, 0x75, 0x74,
|
||||
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75,
|
||||
0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74,
|
||||
0x12, 0x38, 0x0a, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
|
||||
0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x45, 0x0a, 0x11, 0x44, 0x62,
|
||||
0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12,
|
||||
0x16, 0x0a, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
|
||||
0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x22, 0xb9, 0x01, 0x0a, 0x0b, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4a, 0x0a, 0x0f, 0x73, 0x63, 0x72,
|
||||
0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||
0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c,
|
||||
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6c,
|
||||
0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x62,
|
||||
0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1e, 0x0a,
|
||||
0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
|
||||
0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x22, 0xfe, 0x01,
|
||||
0x0a, 0x12, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
|
||||
0x44, 0x61, 0x74, 0x61, 0x12, 0x31, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69,
|
||||
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x63,
|
||||
0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
|
||||
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06,
|
||||
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76,
|
||||
0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61,
|
||||
0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68,
|
||||
0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52,
|
||||
0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x11, 0x66, 0x75, 0x74,
|
||||
0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x18, 0x04,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x11, 0x66, 0x75, 0x74,
|
||||
0x75, 0x72, 0x65, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x22, 0x40,
|
||||
0x0a, 0x16, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
|
||||
0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10,
|
||||
0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, 0x6e, 0x64,
|
||||
0x22, 0x88, 0x01, 0x0a, 0x0a, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x44, 0x69, 0x66, 0x66, 0x12,
|
||||
0x39, 0x0a, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23,
|
||||
0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44,
|
||||
0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49,
|
||||
0x74, 0x65, 0x6d, 0x52, 0x05, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x3f, 0x0a, 0x08, 0x74, 0x6f,
|
||||
0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55,
|
||||
0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65,
|
||||
0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x33, 0x0a, 0x06, 0x44,
|
||||
0x62, 0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73,
|
||||
0x22, 0x24, 0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
|
||||
0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05,
|
||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75,
|
||||
0x6e, 0x74, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64,
|
||||
0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -1895,37 +1907,38 @@ var file_dbobjects_proto_depIdxs = []int32{
|
||||
4, // 6: serialization.DbTransaction.inputs:type_name -> serialization.DbTransactionInput
|
||||
7, // 7: serialization.DbTransaction.outputs:type_name -> serialization.DbTransactionOutput
|
||||
8, // 8: serialization.DbTransaction.subnetworkID:type_name -> serialization.DbSubnetworkId
|
||||
5, // 9: serialization.DbTransactionInput.previousOutpoint:type_name -> serialization.DbOutpoint
|
||||
6, // 10: serialization.DbOutpoint.transactionID:type_name -> serialization.DbTransactionId
|
||||
19, // 11: serialization.DbTransactionOutput.scriptPublicKey:type_name -> serialization.DbScriptPublicKey
|
||||
10, // 12: serialization.DbAcceptanceData.blockAcceptanceData:type_name -> serialization.DbBlockAcceptanceData
|
||||
11, // 13: serialization.DbBlockAcceptanceData.transactionAcceptanceData:type_name -> serialization.DbTransactionAcceptanceData
|
||||
2, // 14: serialization.DbBlockAcceptanceData.blockHash:type_name -> serialization.DbHash
|
||||
3, // 15: serialization.DbTransactionAcceptanceData.transaction:type_name -> serialization.DbTransaction
|
||||
20, // 16: serialization.DbTransactionAcceptanceData.transactionInputUtxoEntries:type_name -> serialization.DbUtxoEntry
|
||||
2, // 17: serialization.DbBlockRelations.parents:type_name -> serialization.DbHash
|
||||
2, // 18: serialization.DbBlockRelations.children:type_name -> serialization.DbHash
|
||||
2, // 19: serialization.DbBlockGhostdagData.selectedParent:type_name -> serialization.DbHash
|
||||
2, // 20: serialization.DbBlockGhostdagData.mergeSetBlues:type_name -> serialization.DbHash
|
||||
2, // 21: serialization.DbBlockGhostdagData.mergeSetReds:type_name -> serialization.DbHash
|
||||
15, // 22: serialization.DbBlockGhostdagData.bluesAnticoneSizes:type_name -> serialization.DbBluesAnticoneSizes
|
||||
2, // 23: serialization.DbBluesAnticoneSizes.blueHash:type_name -> serialization.DbHash
|
||||
18, // 24: serialization.DbUtxoSet.items:type_name -> serialization.DbUtxoCollectionItem
|
||||
5, // 25: serialization.DbUtxoCollectionItem.outpoint:type_name -> serialization.DbOutpoint
|
||||
20, // 26: serialization.DbUtxoCollectionItem.utxoEntry:type_name -> serialization.DbUtxoEntry
|
||||
19, // 27: serialization.DbUtxoEntry.scriptPublicKey:type_name -> serialization.DbScriptPublicKey
|
||||
2, // 28: serialization.DbReachabilityData.children:type_name -> serialization.DbHash
|
||||
2, // 29: serialization.DbReachabilityData.parent:type_name -> serialization.DbHash
|
||||
22, // 30: serialization.DbReachabilityData.interval:type_name -> serialization.DbReachabilityInterval
|
||||
2, // 31: serialization.DbReachabilityData.futureCoveringSet:type_name -> serialization.DbHash
|
||||
18, // 32: serialization.DbUtxoDiff.toAdd:type_name -> serialization.DbUtxoCollectionItem
|
||||
18, // 33: serialization.DbUtxoDiff.toRemove:type_name -> serialization.DbUtxoCollectionItem
|
||||
2, // 34: serialization.DbTips.tips:type_name -> serialization.DbHash
|
||||
35, // [35:35] is the sub-list for method output_type
|
||||
35, // [35:35] is the sub-list for method input_type
|
||||
35, // [35:35] is the sub-list for extension type_name
|
||||
35, // [35:35] is the sub-list for extension extendee
|
||||
0, // [0:35] is the sub-list for field type_name
|
||||
2, // 9: serialization.DbTransaction.payloadHash:type_name -> serialization.DbHash
|
||||
5, // 10: serialization.DbTransactionInput.previousOutpoint:type_name -> serialization.DbOutpoint
|
||||
6, // 11: serialization.DbOutpoint.transactionID:type_name -> serialization.DbTransactionId
|
||||
19, // 12: serialization.DbTransactionOutput.scriptPublicKey:type_name -> serialization.DbScriptPublicKey
|
||||
10, // 13: serialization.DbAcceptanceData.blockAcceptanceData:type_name -> serialization.DbBlockAcceptanceData
|
||||
11, // 14: serialization.DbBlockAcceptanceData.transactionAcceptanceData:type_name -> serialization.DbTransactionAcceptanceData
|
||||
2, // 15: serialization.DbBlockAcceptanceData.blockHash:type_name -> serialization.DbHash
|
||||
3, // 16: serialization.DbTransactionAcceptanceData.transaction:type_name -> serialization.DbTransaction
|
||||
20, // 17: serialization.DbTransactionAcceptanceData.transactionInputUtxoEntries:type_name -> serialization.DbUtxoEntry
|
||||
2, // 18: serialization.DbBlockRelations.parents:type_name -> serialization.DbHash
|
||||
2, // 19: serialization.DbBlockRelations.children:type_name -> serialization.DbHash
|
||||
2, // 20: serialization.DbBlockGhostdagData.selectedParent:type_name -> serialization.DbHash
|
||||
2, // 21: serialization.DbBlockGhostdagData.mergeSetBlues:type_name -> serialization.DbHash
|
||||
2, // 22: serialization.DbBlockGhostdagData.mergeSetReds:type_name -> serialization.DbHash
|
||||
15, // 23: serialization.DbBlockGhostdagData.bluesAnticoneSizes:type_name -> serialization.DbBluesAnticoneSizes
|
||||
2, // 24: serialization.DbBluesAnticoneSizes.blueHash:type_name -> serialization.DbHash
|
||||
18, // 25: serialization.DbUtxoSet.items:type_name -> serialization.DbUtxoCollectionItem
|
||||
5, // 26: serialization.DbUtxoCollectionItem.outpoint:type_name -> serialization.DbOutpoint
|
||||
20, // 27: serialization.DbUtxoCollectionItem.utxoEntry:type_name -> serialization.DbUtxoEntry
|
||||
19, // 28: serialization.DbUtxoEntry.scriptPublicKey:type_name -> serialization.DbScriptPublicKey
|
||||
2, // 29: serialization.DbReachabilityData.children:type_name -> serialization.DbHash
|
||||
2, // 30: serialization.DbReachabilityData.parent:type_name -> serialization.DbHash
|
||||
22, // 31: serialization.DbReachabilityData.interval:type_name -> serialization.DbReachabilityInterval
|
||||
2, // 32: serialization.DbReachabilityData.futureCoveringSet:type_name -> serialization.DbHash
|
||||
18, // 33: serialization.DbUtxoDiff.toAdd:type_name -> serialization.DbUtxoCollectionItem
|
||||
18, // 34: serialization.DbUtxoDiff.toRemove:type_name -> serialization.DbUtxoCollectionItem
|
||||
2, // 35: serialization.DbTips.tips:type_name -> serialization.DbHash
|
||||
36, // [36:36] is the sub-list for method output_type
|
||||
36, // [36:36] is the sub-list for method input_type
|
||||
36, // [36:36] is the sub-list for extension type_name
|
||||
36, // [36:36] is the sub-list for extension extendee
|
||||
0, // [0:36] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_dbobjects_proto_init() }
|
||||
|
||||
@@ -30,6 +30,7 @@ message DbTransaction {
|
||||
uint64 lockTime = 4;
|
||||
DbSubnetworkId subnetworkID = 5;
|
||||
uint64 gas = 6;
|
||||
DbHash payloadHash = 7;
|
||||
bytes payload = 8;
|
||||
}
|
||||
|
||||
@@ -117,7 +118,7 @@ message DbScriptPublicKey {
|
||||
message DbUtxoEntry {
|
||||
uint64 amount = 1;
|
||||
DbScriptPublicKey scriptPublicKey = 2;
|
||||
uint64 blockDaaScore = 3;
|
||||
uint64 blockBlueScore = 3;
|
||||
bool isCoinbase = 4;
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ func DomainTransactionToDbTransaction(domainTransaction *externalapi.DomainTrans
|
||||
LockTime: domainTransaction.LockTime,
|
||||
SubnetworkID: DomainSubnetworkIDToDbSubnetworkID(&domainTransaction.SubnetworkID),
|
||||
Gas: domainTransaction.Gas,
|
||||
PayloadHash: DomainHashToDbHash(&domainTransaction.PayloadHash),
|
||||
Payload: domainTransaction.Payload,
|
||||
}
|
||||
}
|
||||
@@ -43,6 +44,10 @@ func DbTransactionToDomainTransaction(dbTransaction *DbTransaction) (*externalap
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domainPayloadHash, err := DbHashToDomainHash(dbTransaction.PayloadHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
domainInputs := make([]*externalapi.DomainTransactionInput, len(dbTransaction.Inputs))
|
||||
for i, dbTransactionInput := range dbTransaction.Inputs {
|
||||
@@ -79,6 +84,7 @@ func DbTransactionToDomainTransaction(dbTransaction *DbTransaction) (*externalap
|
||||
LockTime: dbTransaction.LockTime,
|
||||
SubnetworkID: *domainSubnetworkID,
|
||||
Gas: dbTransaction.Gas,
|
||||
PayloadHash: *domainPayloadHash,
|
||||
Payload: dbTransaction.Payload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func UTXOEntryToDBUTXOEntry(utxoEntry externalapi.UTXOEntry) *DbUtxoEntry {
|
||||
return &DbUtxoEntry{
|
||||
Amount: utxoEntry.Amount(),
|
||||
ScriptPublicKey: dbScriptPublicKey,
|
||||
BlockDaaScore: utxoEntry.BlockDAAScore(),
|
||||
BlockBlueScore: utxoEntry.BlockBlueScore(),
|
||||
IsCoinbase: utxoEntry.IsCoinbase(),
|
||||
}
|
||||
}
|
||||
@@ -37,5 +37,5 @@ func DBUTXOEntryToUTXOEntry(dbUtxoEntry *DbUtxoEntry) (externalapi.UTXOEntry, er
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utxo.NewUTXOEntry(dbUtxoEntry.Amount, scriptPublicKey, dbUtxoEntry.IsCoinbase, dbUtxoEntry.BlockDaaScore), nil
|
||||
return utxo.NewUTXOEntry(dbUtxoEntry.Amount, scriptPublicKey, dbUtxoEntry.IsCoinbase, dbUtxoEntry.BlockBlueScore), nil
|
||||
}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package acceptancedatastore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type acceptanceDataStagingShard struct {
|
||||
store *acceptanceDataStore
|
||||
toAdd map[externalapi.DomainHash]externalapi.AcceptanceData
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
}
|
||||
|
||||
func (ads *acceptanceDataStore) stagingShard(stagingArea *model.StagingArea) *acceptanceDataStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDAcceptanceData, func() model.StagingShard {
|
||||
return &acceptanceDataStagingShard{
|
||||
store: ads,
|
||||
toAdd: make(map[externalapi.DomainHash]externalapi.AcceptanceData),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
}
|
||||
}).(*acceptanceDataStagingShard)
|
||||
}
|
||||
|
||||
func (adss *acceptanceDataStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, acceptanceData := range adss.toAdd {
|
||||
acceptanceDataBytes, err := adss.store.serializeAcceptanceData(acceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(adss.store.hashAsKey(&hash), acceptanceDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adss.store.cache.Add(&hash, acceptanceData)
|
||||
}
|
||||
|
||||
for hash := range adss.toDelete {
|
||||
err := dbTx.Delete(adss.store.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adss.store.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (adss *acceptanceDataStagingShard) isStaged() bool {
|
||||
return len(adss.toAdd) != 0 || len(adss.toDelete) != 0
|
||||
}
|
||||
@@ -13,31 +13,62 @@ var bucket = database.MakeBucket([]byte("acceptance-data"))
|
||||
|
||||
// acceptanceDataStore represents a store of AcceptanceData
|
||||
type acceptanceDataStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
staging map[externalapi.DomainHash]externalapi.AcceptanceData
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new AcceptanceDataStore
|
||||
func New(cacheSize int, preallocate bool) model.AcceptanceDataStore {
|
||||
return &acceptanceDataStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]externalapi.AcceptanceData),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given acceptanceData for the given blockHash
|
||||
func (ads *acceptanceDataStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData) {
|
||||
stagingShard := ads.stagingShard(stagingArea)
|
||||
stagingShard.toAdd[*blockHash] = acceptanceData.Clone()
|
||||
func (ads *acceptanceDataStore) Stage(blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData) {
|
||||
ads.staging[*blockHash] = acceptanceData.Clone()
|
||||
}
|
||||
|
||||
func (ads *acceptanceDataStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return ads.stagingShard(stagingArea).isStaged()
|
||||
func (ads *acceptanceDataStore) IsStaged() bool {
|
||||
return len(ads.staging) != 0 || len(ads.toDelete) != 0
|
||||
}
|
||||
|
||||
func (ads *acceptanceDataStore) Discard() {
|
||||
ads.staging = make(map[externalapi.DomainHash]externalapi.AcceptanceData)
|
||||
ads.toDelete = make(map[externalapi.DomainHash]struct{})
|
||||
}
|
||||
|
||||
func (ads *acceptanceDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, acceptanceData := range ads.staging {
|
||||
acceptanceDataBytes, err := ads.serializeAcceptanceData(acceptanceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(ads.hashAsKey(&hash), acceptanceDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ads.cache.Add(&hash, acceptanceData)
|
||||
}
|
||||
|
||||
for hash := range ads.toDelete {
|
||||
err := dbTx.Delete(ads.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ads.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
ads.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the acceptanceData associated with the given blockHash
|
||||
func (ads *acceptanceDataStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
|
||||
stagingShard := ads.stagingShard(stagingArea)
|
||||
|
||||
if acceptanceData, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (ads *acceptanceDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) {
|
||||
if acceptanceData, ok := ads.staging[*blockHash]; ok {
|
||||
return acceptanceData.Clone(), nil
|
||||
}
|
||||
|
||||
@@ -59,14 +90,12 @@ func (ads *acceptanceDataStore) Get(dbContext model.DBReader, stagingArea *model
|
||||
}
|
||||
|
||||
// Delete deletes the acceptanceData associated with the given blockHash
|
||||
func (ads *acceptanceDataStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
||||
stagingShard := ads.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
delete(stagingShard.toAdd, *blockHash)
|
||||
func (ads *acceptanceDataStore) Delete(blockHash *externalapi.DomainHash) {
|
||||
if _, ok := ads.staging[*blockHash]; ok {
|
||||
delete(ads.staging, *blockHash)
|
||||
return
|
||||
}
|
||||
stagingShard.toDelete[*blockHash] = struct{}{}
|
||||
ads.toDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (ads *acceptanceDataStore) serializeAcceptanceData(acceptanceData externalapi.AcceptanceData) ([]byte, error) {
|
||||
@@ -1,69 +0,0 @@
|
||||
package blockheaderstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type blockHeaderStagingShard struct {
|
||||
store *blockHeaderStore
|
||||
toAdd map[externalapi.DomainHash]externalapi.BlockHeader
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) stagingShard(stagingArea *model.StagingArea) *blockHeaderStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockHeader, func() model.StagingShard {
|
||||
return &blockHeaderStagingShard{
|
||||
store: bhs,
|
||||
toAdd: make(map[externalapi.DomainHash]externalapi.BlockHeader),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
}
|
||||
}).(*blockHeaderStagingShard)
|
||||
}
|
||||
|
||||
func (bhss *blockHeaderStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, header := range bhss.toAdd {
|
||||
headerBytes, err := bhss.store.serializeHeader(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(bhss.store.hashAsKey(&hash), headerBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhss.store.cache.Add(&hash, header)
|
||||
}
|
||||
|
||||
for hash := range bhss.toDelete {
|
||||
err := dbTx.Delete(bhss.store.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhss.store.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
err := bhss.commitCount(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bhss *blockHeaderStagingShard) commitCount(dbTx model.DBTransaction) error {
|
||||
count := bhss.store.count(bhss)
|
||||
countBytes, err := bhss.store.serializeHeaderCount(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(countKey, countBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhss.store.countCached = count
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bhss *blockHeaderStagingShard) isStaged() bool {
|
||||
return len(bhss.toAdd) != 0 || len(bhss.toDelete) != 0
|
||||
}
|
||||
@@ -14,14 +14,18 @@ var countKey = database.MakeBucket(nil).Key([]byte("block-headers-count"))
|
||||
|
||||
// blockHeaderStore represents a store of blocks
|
||||
type blockHeaderStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
countCached uint64
|
||||
staging map[externalapi.DomainHash]externalapi.BlockHeader
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
count uint64
|
||||
}
|
||||
|
||||
// New instantiates a new BlockHeaderStore
|
||||
func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) {
|
||||
blockHeaderStore := &blockHeaderStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]externalapi.BlockHeader),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
|
||||
err := blockHeaderStore.initializeCount(dbContext)
|
||||
@@ -48,33 +52,57 @@ func (bhs *blockHeaderStore) initializeCount(dbContext model.DBReader) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bhs.countCached = count
|
||||
bhs.count = count
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage stages the given block header for the given blockHash
|
||||
func (bhs *blockHeaderStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockHeader externalapi.BlockHeader) {
|
||||
stagingShard := bhs.stagingShard(stagingArea)
|
||||
stagingShard.toAdd[*blockHash] = blockHeader
|
||||
func (bhs *blockHeaderStore) Stage(blockHash *externalapi.DomainHash, blockHeader externalapi.BlockHeader) {
|
||||
bhs.staging[*blockHash] = blockHeader
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return bhs.stagingShard(stagingArea).isStaged()
|
||||
func (bhs *blockHeaderStore) IsStaged() bool {
|
||||
return len(bhs.staging) != 0 || len(bhs.toDelete) != 0
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) Discard() {
|
||||
bhs.staging = make(map[externalapi.DomainHash]externalapi.BlockHeader)
|
||||
bhs.toDelete = make(map[externalapi.DomainHash]struct{})
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, header := range bhs.staging {
|
||||
headerBytes, err := bhs.serializeHeader(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(bhs.hashAsKey(&hash), headerBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhs.cache.Add(&hash, header)
|
||||
}
|
||||
|
||||
for hash := range bhs.toDelete {
|
||||
err := dbTx.Delete(bhs.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhs.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
err := bhs.commitCount(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bhs.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockHeader gets the block header associated with the given blockHash
|
||||
func (bhs *blockHeaderStore) BlockHeader(dbContext model.DBReader, stagingArea *model.StagingArea,
|
||||
blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) {
|
||||
|
||||
stagingShard := bhs.stagingShard(stagingArea)
|
||||
|
||||
return bhs.blockHeader(dbContext, stagingShard, blockHash)
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) blockHeader(dbContext model.DBReader, stagingShard *blockHeaderStagingShard,
|
||||
blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) {
|
||||
|
||||
if header, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (bhs *blockHeaderStore) BlockHeader(dbContext model.DBReader, blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) {
|
||||
if header, ok := bhs.staging[*blockHash]; ok {
|
||||
return header, nil
|
||||
}
|
||||
|
||||
@@ -96,10 +124,8 @@ func (bhs *blockHeaderStore) blockHeader(dbContext model.DBReader, stagingShard
|
||||
}
|
||||
|
||||
// HasBlock returns whether a block header with a given hash exists in the store.
|
||||
func (bhs *blockHeaderStore) HasBlockHeader(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
stagingShard := bhs.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (bhs *blockHeaderStore) HasBlockHeader(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if _, ok := bhs.staging[*blockHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -116,15 +142,11 @@ func (bhs *blockHeaderStore) HasBlockHeader(dbContext model.DBReader, stagingAre
|
||||
}
|
||||
|
||||
// BlockHeaders gets the block headers associated with the given blockHashes
|
||||
func (bhs *blockHeaderStore) BlockHeaders(dbContext model.DBReader, stagingArea *model.StagingArea,
|
||||
blockHashes []*externalapi.DomainHash) ([]externalapi.BlockHeader, error) {
|
||||
|
||||
stagingShard := bhs.stagingShard(stagingArea)
|
||||
|
||||
func (bhs *blockHeaderStore) BlockHeaders(dbContext model.DBReader, blockHashes []*externalapi.DomainHash) ([]externalapi.BlockHeader, error) {
|
||||
headers := make([]externalapi.BlockHeader, len(blockHashes))
|
||||
for i, hash := range blockHashes {
|
||||
var err error
|
||||
headers[i], err = bhs.blockHeader(dbContext, stagingShard, hash)
|
||||
headers[i], err = bhs.BlockHeader(dbContext, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,14 +155,12 @@ func (bhs *blockHeaderStore) BlockHeaders(dbContext model.DBReader, stagingArea
|
||||
}
|
||||
|
||||
// Delete deletes the block associated with the given blockHash
|
||||
func (bhs *blockHeaderStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
||||
stagingShard := bhs.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
delete(stagingShard.toAdd, *blockHash)
|
||||
func (bhs *blockHeaderStore) Delete(blockHash *externalapi.DomainHash) {
|
||||
if _, ok := bhs.staging[*blockHash]; ok {
|
||||
delete(bhs.staging, *blockHash)
|
||||
return
|
||||
}
|
||||
stagingShard.toDelete[*blockHash] = struct{}{}
|
||||
bhs.toDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
@@ -161,14 +181,8 @@ func (bhs *blockHeaderStore) deserializeHeader(headerBytes []byte) (externalapi.
|
||||
return serialization.DbBlockHeaderToDomainBlockHeader(dbBlockHeader)
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) Count(stagingArea *model.StagingArea) uint64 {
|
||||
stagingShard := bhs.stagingShard(stagingArea)
|
||||
|
||||
return bhs.count(stagingShard)
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) count(stagingShard *blockHeaderStagingShard) uint64 {
|
||||
return bhs.countCached + uint64(len(stagingShard.toAdd)) - uint64(len(stagingShard.toDelete))
|
||||
func (bhs *blockHeaderStore) Count() uint64 {
|
||||
return bhs.count + uint64(len(bhs.staging)) - uint64(len(bhs.toDelete))
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) deserializeHeaderCount(countBytes []byte) (uint64, error) {
|
||||
@@ -180,6 +194,20 @@ func (bhs *blockHeaderStore) deserializeHeaderCount(countBytes []byte) (uint64,
|
||||
return dbBlockHeaderCount.Count, nil
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) commitCount(dbTx model.DBTransaction) error {
|
||||
count := bhs.Count()
|
||||
countBytes, err := bhs.serializeHeaderCount(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(countKey, countBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhs.count = count
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bhs *blockHeaderStore) serializeHeaderCount(count uint64) ([]byte, error) {
|
||||
dbBlockHeaderCount := &serialization.DbBlockHeaderCount{Count: count}
|
||||
return proto.Marshal(dbBlockHeaderCount)
|
||||
@@ -1,40 +0,0 @@
|
||||
package blockrelationstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type blockRelationStagingShard struct {
|
||||
store *blockRelationStore
|
||||
toAdd map[externalapi.DomainHash]*model.BlockRelations
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) stagingShard(stagingArea *model.StagingArea) *blockRelationStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockRelation, func() model.StagingShard {
|
||||
return &blockRelationStagingShard{
|
||||
store: brs,
|
||||
toAdd: make(map[externalapi.DomainHash]*model.BlockRelations),
|
||||
}
|
||||
}).(*blockRelationStagingShard)
|
||||
}
|
||||
|
||||
func (brss *blockRelationStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, blockRelations := range brss.toAdd {
|
||||
blockRelationBytes, err := brss.store.serializeBlockRelations(blockRelations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(brss.store.hashAsKey(&hash), blockRelationBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
brss.store.cache.Add(&hash, blockRelations)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (brss *blockRelationStagingShard) isStaged() bool {
|
||||
return len(brss.toAdd) != 0
|
||||
}
|
||||
@@ -13,30 +13,49 @@ var bucket = database.MakeBucket([]byte("block-relations"))
|
||||
|
||||
// blockRelationStore represents a store of BlockRelations
|
||||
type blockRelationStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
staging map[externalapi.DomainHash]*model.BlockRelations
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new BlockRelationStore
|
||||
func New(cacheSize int, preallocate bool) model.BlockRelationStore {
|
||||
return &blockRelationStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]*model.BlockRelations),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) StageBlockRelation(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockRelations *model.BlockRelations) {
|
||||
stagingShard := brs.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.toAdd[*blockHash] = blockRelations.Clone()
|
||||
func (brs *blockRelationStore) StageBlockRelation(blockHash *externalapi.DomainHash, blockRelations *model.BlockRelations) {
|
||||
brs.staging[*blockHash] = blockRelations.Clone()
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return brs.stagingShard(stagingArea).isStaged()
|
||||
func (brs *blockRelationStore) IsStaged() bool {
|
||||
return len(brs.staging) != 0
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) BlockRelation(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*model.BlockRelations, error) {
|
||||
stagingShard := brs.stagingShard(stagingArea)
|
||||
func (brs *blockRelationStore) Discard() {
|
||||
brs.staging = make(map[externalapi.DomainHash]*model.BlockRelations)
|
||||
}
|
||||
|
||||
if blockRelations, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (brs *blockRelationStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, blockRelations := range brs.staging {
|
||||
blockRelationBytes, err := brs.serializeBlockRelations(blockRelations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(brs.hashAsKey(&hash), blockRelationBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
brs.cache.Add(&hash, blockRelations)
|
||||
}
|
||||
|
||||
brs.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) BlockRelation(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockRelations, error) {
|
||||
if blockRelations, ok := brs.staging[*blockHash]; ok {
|
||||
return blockRelations.Clone(), nil
|
||||
}
|
||||
|
||||
@@ -57,10 +76,8 @@ func (brs *blockRelationStore) BlockRelation(dbContext model.DBReader, stagingAr
|
||||
return blockRelations.Clone(), nil
|
||||
}
|
||||
|
||||
func (brs *blockRelationStore) Has(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
stagingShard := brs.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (brs *blockRelationStore) Has(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if _, ok := brs.staging[*blockHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
package blockstatusstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type blockStatusStagingShard struct {
|
||||
store *blockStatusStore
|
||||
toAdd map[externalapi.DomainHash]externalapi.BlockStatus
|
||||
}
|
||||
|
||||
func (bss *blockStatusStore) stagingShard(stagingArea *model.StagingArea) *blockStatusStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlockStatus, func() model.StagingShard {
|
||||
return &blockStatusStagingShard{
|
||||
store: bss,
|
||||
toAdd: make(map[externalapi.DomainHash]externalapi.BlockStatus),
|
||||
}
|
||||
}).(*blockStatusStagingShard)
|
||||
}
|
||||
|
||||
func (bsss *blockStatusStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, status := range bsss.toAdd {
|
||||
blockStatusBytes, err := bsss.store.serializeBlockStatus(status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(bsss.store.hashAsKey(&hash), blockStatusBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bsss.store.cache.Add(&hash, status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bsss *blockStatusStagingShard) isStaged() bool {
|
||||
return len(bsss.toAdd) != 0
|
||||
}
|
||||
@@ -13,31 +13,51 @@ var bucket = database.MakeBucket([]byte("block-statuses"))
|
||||
|
||||
// blockStatusStore represents a store of BlockStatuses
|
||||
type blockStatusStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
staging map[externalapi.DomainHash]externalapi.BlockStatus
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStatusStore
|
||||
func New(cacheSize int, preallocate bool) model.BlockStatusStore {
|
||||
return &blockStatusStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]externalapi.BlockStatus),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given blockStatus for the given blockHash
|
||||
func (bss *blockStatusStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockStatus externalapi.BlockStatus) {
|
||||
stagingShard := bss.stagingShard(stagingArea)
|
||||
stagingShard.toAdd[*blockHash] = blockStatus.Clone()
|
||||
func (bss *blockStatusStore) Stage(blockHash *externalapi.DomainHash, blockStatus externalapi.BlockStatus) {
|
||||
bss.staging[*blockHash] = blockStatus.Clone()
|
||||
}
|
||||
|
||||
func (bss *blockStatusStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return bss.stagingShard(stagingArea).isStaged()
|
||||
func (bss *blockStatusStore) IsStaged() bool {
|
||||
return len(bss.staging) != 0
|
||||
}
|
||||
|
||||
func (bss *blockStatusStore) Discard() {
|
||||
bss.staging = make(map[externalapi.DomainHash]externalapi.BlockStatus)
|
||||
}
|
||||
|
||||
func (bss *blockStatusStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, status := range bss.staging {
|
||||
blockStatusBytes, err := bss.serializeBlockStatus(status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(bss.hashAsKey(&hash), blockStatusBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bss.cache.Add(&hash, status)
|
||||
}
|
||||
|
||||
bss.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the blockStatus associated with the given blockHash
|
||||
func (bss *blockStatusStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.BlockStatus, error) {
|
||||
stagingShard := bss.stagingShard(stagingArea)
|
||||
|
||||
if status, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (bss *blockStatusStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (externalapi.BlockStatus, error) {
|
||||
if status, ok := bss.staging[*blockHash]; ok {
|
||||
return status, nil
|
||||
}
|
||||
|
||||
@@ -59,10 +79,8 @@ func (bss *blockStatusStore) Get(dbContext model.DBReader, stagingArea *model.St
|
||||
}
|
||||
|
||||
// Exists returns true if the blockStatus for the given blockHash exists
|
||||
func (bss *blockStatusStore) Exists(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
stagingShard := bss.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (bss *blockStatusStore) Exists(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if _, ok := bss.staging[*blockHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type blockStagingShard struct {
|
||||
store *blockStore
|
||||
toAdd map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
}
|
||||
|
||||
func (bs *blockStore) stagingShard(stagingArea *model.StagingArea) *blockStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDBlock, func() model.StagingShard {
|
||||
return &blockStagingShard{
|
||||
store: bs,
|
||||
toAdd: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
}
|
||||
}).(*blockStagingShard)
|
||||
}
|
||||
|
||||
func (bss *blockStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, block := range bss.toAdd {
|
||||
blockBytes, err := bss.store.serializeBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(bss.store.hashAsKey(&hash), blockBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bss.store.cache.Add(&hash, block)
|
||||
}
|
||||
|
||||
for hash := range bss.toDelete {
|
||||
err := dbTx.Delete(bss.store.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bss.store.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
err := bss.commitCount(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bss *blockStagingShard) commitCount(dbTx model.DBTransaction) error {
|
||||
count := bss.store.count(bss)
|
||||
countBytes, err := bss.store.serializeBlockCount(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(countKey, countBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bss.store.countCached = count
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bss *blockStagingShard) isStaged() bool {
|
||||
return len(bss.toAdd) != 0 || len(bss.toDelete) != 0
|
||||
}
|
||||
@@ -15,14 +15,18 @@ var countKey = database.MakeBucket(nil).Key([]byte("blocks-count"))
|
||||
|
||||
// blockStore represents a store of blocks
|
||||
type blockStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
countCached uint64
|
||||
staging map[externalapi.DomainHash]*externalapi.DomainBlock
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
count uint64
|
||||
}
|
||||
|
||||
// New instantiates a new BlockStore
|
||||
func New(dbContext model.DBReader, cacheSize int, preallocate bool) (model.BlockStore, error) {
|
||||
blockStore := &blockStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]*externalapi.DomainBlock),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
|
||||
err := blockStore.initializeCount(dbContext)
|
||||
@@ -49,29 +53,57 @@ func (bs *blockStore) initializeCount(dbContext model.DBReader) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
bs.countCached = count
|
||||
bs.count = count
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage stages the given block for the given blockHash
|
||||
func (bs *blockStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) {
|
||||
stagingShard := bs.stagingShard(stagingArea)
|
||||
stagingShard.toAdd[*blockHash] = block.Clone()
|
||||
func (bs *blockStore) Stage(blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) {
|
||||
bs.staging[*blockHash] = block.Clone()
|
||||
}
|
||||
|
||||
func (bs *blockStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return bs.stagingShard(stagingArea).isStaged()
|
||||
func (bs *blockStore) IsStaged() bool {
|
||||
return len(bs.staging) != 0 || len(bs.toDelete) != 0
|
||||
}
|
||||
|
||||
func (bs *blockStore) Discard() {
|
||||
bs.staging = make(map[externalapi.DomainHash]*externalapi.DomainBlock)
|
||||
bs.toDelete = make(map[externalapi.DomainHash]struct{})
|
||||
}
|
||||
|
||||
func (bs *blockStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, block := range bs.staging {
|
||||
blockBytes, err := bs.serializeBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(bs.hashAsKey(&hash), blockBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs.cache.Add(&hash, block)
|
||||
}
|
||||
|
||||
for hash := range bs.toDelete {
|
||||
err := dbTx.Delete(bs.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
err := bs.commitCount(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bs.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Block gets the block associated with the given blockHash
|
||||
func (bs *blockStore) Block(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
stagingShard := bs.stagingShard(stagingArea)
|
||||
|
||||
return bs.block(dbContext, stagingShard, blockHash)
|
||||
}
|
||||
|
||||
func (bs *blockStore) block(dbContext model.DBReader, stagingShard *blockStagingShard, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
if block, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (bs *blockStore) Block(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) {
|
||||
if block, ok := bs.staging[*blockHash]; ok {
|
||||
return block.Clone(), nil
|
||||
}
|
||||
|
||||
@@ -93,10 +125,8 @@ func (bs *blockStore) block(dbContext model.DBReader, stagingShard *blockStaging
|
||||
}
|
||||
|
||||
// HasBlock returns whether a block with a given hash exists in the store.
|
||||
func (bs *blockStore) HasBlock(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
stagingShard := bs.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (bs *blockStore) HasBlock(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if _, ok := bs.staging[*blockHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -113,13 +143,11 @@ func (bs *blockStore) HasBlock(dbContext model.DBReader, stagingArea *model.Stag
|
||||
}
|
||||
|
||||
// Blocks gets the blocks associated with the given blockHashes
|
||||
func (bs *blockStore) Blocks(dbContext model.DBReader, stagingArea *model.StagingArea, blockHashes []*externalapi.DomainHash) ([]*externalapi.DomainBlock, error) {
|
||||
stagingShard := bs.stagingShard(stagingArea)
|
||||
|
||||
func (bs *blockStore) Blocks(dbContext model.DBReader, blockHashes []*externalapi.DomainHash) ([]*externalapi.DomainBlock, error) {
|
||||
blocks := make([]*externalapi.DomainBlock, len(blockHashes))
|
||||
for i, hash := range blockHashes {
|
||||
var err error
|
||||
blocks[i], err = bs.block(dbContext, stagingShard, hash)
|
||||
blocks[i], err = bs.Block(dbContext, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -128,14 +156,12 @@ func (bs *blockStore) Blocks(dbContext model.DBReader, stagingArea *model.Stagin
|
||||
}
|
||||
|
||||
// Delete deletes the block associated with the given blockHash
|
||||
func (bs *blockStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
||||
stagingShard := bs.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
delete(stagingShard.toAdd, *blockHash)
|
||||
func (bs *blockStore) Delete(blockHash *externalapi.DomainHash) {
|
||||
if _, ok := bs.staging[*blockHash]; ok {
|
||||
delete(bs.staging, *blockHash)
|
||||
return
|
||||
}
|
||||
stagingShard.toDelete[*blockHash] = struct{}{}
|
||||
bs.toDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (bs *blockStore) serializeBlock(block *externalapi.DomainBlock) ([]byte, error) {
|
||||
@@ -156,13 +182,8 @@ func (bs *blockStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
return bucket.Key(hash.ByteSlice())
|
||||
}
|
||||
|
||||
func (bs *blockStore) Count(stagingArea *model.StagingArea) uint64 {
|
||||
stagingShard := bs.stagingShard(stagingArea)
|
||||
return bs.count(stagingShard)
|
||||
}
|
||||
|
||||
func (bs *blockStore) count(stagingShard *blockStagingShard) uint64 {
|
||||
return bs.countCached + uint64(len(stagingShard.toAdd)) - uint64(len(stagingShard.toDelete))
|
||||
func (bs *blockStore) Count() uint64 {
|
||||
return bs.count + uint64(len(bs.staging)) - uint64(len(bs.toDelete))
|
||||
}
|
||||
|
||||
func (bs *blockStore) deserializeBlockCount(countBytes []byte) (uint64, error) {
|
||||
@@ -174,6 +195,20 @@ func (bs *blockStore) deserializeBlockCount(countBytes []byte) (uint64, error) {
|
||||
return dbBlockCount.Count, nil
|
||||
}
|
||||
|
||||
func (bs *blockStore) commitCount(dbTx model.DBTransaction) error {
|
||||
count := bs.Count()
|
||||
countBytes, err := bs.serializeBlockCount(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(countKey, countBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs.count = count
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *blockStore) serializeBlockCount(count uint64) ([]byte, error) {
|
||||
dbBlockCount := &serialization.DbBlockCount{Count: count}
|
||||
return proto.Marshal(dbBlockCount)
|
||||
@@ -1,40 +0,0 @@
|
||||
package consensusstatestore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type consensusStateStagingShard struct {
|
||||
store *consensusStateStore
|
||||
tipsStaging []*externalapi.DomainHash
|
||||
virtualUTXODiffStaging externalapi.UTXODiff
|
||||
}
|
||||
|
||||
func (bs *consensusStateStore) stagingShard(stagingArea *model.StagingArea) *consensusStateStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDConsensusState, func() model.StagingShard {
|
||||
return &consensusStateStagingShard{
|
||||
store: bs,
|
||||
tipsStaging: nil,
|
||||
virtualUTXODiffStaging: nil,
|
||||
}
|
||||
}).(*consensusStateStagingShard)
|
||||
}
|
||||
|
||||
func (csss *consensusStateStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
err := csss.commitTips(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = csss.commitVirtualUTXODiff(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (csss *consensusStateStagingShard) isStaged() bool {
|
||||
return csss.tipsStaging != nil || csss.virtualUTXODiffStaging != nil
|
||||
}
|
||||
@@ -8,6 +8,9 @@ import (
|
||||
|
||||
// consensusStateStore represents a store for the current consensus state
|
||||
type consensusStateStore struct {
|
||||
tipsStaging []*externalapi.DomainHash
|
||||
virtualUTXODiffStaging externalapi.UTXODiff
|
||||
|
||||
virtualUTXOSetCache *utxolrucache.LRUCache
|
||||
|
||||
tipsCache []*externalapi.DomainHash
|
||||
@@ -20,6 +23,28 @@ func New(utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore {
|
||||
}
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return css.stagingShard(stagingArea).isStaged()
|
||||
func (css *consensusStateStore) Discard() {
|
||||
css.tipsStaging = nil
|
||||
css.virtualUTXODiffStaging = nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) Commit(dbTx model.DBTransaction) error {
|
||||
err := css.commitTips(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = css.commitVirtualUTXODiff(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
css.Discard()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) IsStaged() bool {
|
||||
return css.tipsStaging != nil ||
|
||||
css.virtualUTXODiffStaging != nil
|
||||
}
|
||||
@@ -10,11 +10,9 @@ import (
|
||||
|
||||
var tipsKey = database.MakeBucket(nil).Key([]byte("tips"))
|
||||
|
||||
func (css *consensusStateStore) Tips(stagingArea *model.StagingArea, dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
stagingShard := css.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.tipsStaging != nil {
|
||||
return externalapi.CloneHashes(stagingShard.tipsStaging), nil
|
||||
func (css *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
|
||||
if css.tipsStaging != nil {
|
||||
return externalapi.CloneHashes(css.tipsStaging), nil
|
||||
}
|
||||
|
||||
if css.tipsCache != nil {
|
||||
@@ -34,10 +32,28 @@ func (css *consensusStateStore) Tips(stagingArea *model.StagingArea, dbContext m
|
||||
return externalapi.CloneHashes(tips), nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) StageTips(stagingArea *model.StagingArea, tipHashes []*externalapi.DomainHash) {
|
||||
stagingShard := css.stagingShard(stagingArea)
|
||||
func (css *consensusStateStore) StageTips(tipHashes []*externalapi.DomainHash) {
|
||||
css.tipsStaging = externalapi.CloneHashes(tipHashes)
|
||||
}
|
||||
|
||||
stagingShard.tipsStaging = externalapi.CloneHashes(tipHashes)
|
||||
func (css *consensusStateStore) commitTips(dbTx model.DBTransaction) error {
|
||||
if css.tipsStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tipsBytes, err := css.serializeTips(css.tipsStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(tipsKey, tipsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
css.tipsCache = css.tipsStaging
|
||||
|
||||
// Note: we don't discard the staging here since that's
|
||||
// being done at the end of Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) {
|
||||
@@ -56,21 +72,3 @@ func (css *consensusStateStore) deserializeTips(tipsBytes []byte) ([]*externalap
|
||||
|
||||
return serialization.DBTipsToTips(dbTips)
|
||||
}
|
||||
|
||||
func (csss *consensusStateStagingShard) commitTips(dbTx model.DBTransaction) error {
|
||||
if csss.tipsStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tipsBytes, err := csss.store.serializeTips(csss.tipsStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(tipsKey, tipsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
csss.store.tipsCache = csss.tipsStaging
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,14 +19,12 @@ func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
|
||||
return utxoSetBucket.Key(serializedOutpoint), nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) StageVirtualUTXODiff(stagingArea *model.StagingArea, virtualUTXODiff externalapi.UTXODiff) {
|
||||
stagingShard := css.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.virtualUTXODiffStaging = virtualUTXODiff
|
||||
func (css *consensusStateStore) StageVirtualUTXODiff(virtualUTXODiff externalapi.UTXODiff) {
|
||||
css.virtualUTXODiffStaging = virtualUTXODiff
|
||||
}
|
||||
|
||||
func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTransaction) error {
|
||||
hadStartedImportingPruningPointUTXOSet, err := csss.store.HadStartedImportingPruningPointUTXOSet(dbTx)
|
||||
func (css *consensusStateStore) commitVirtualUTXODiff(dbTx model.DBTransaction) error {
|
||||
hadStartedImportingPruningPointUTXOSet, err := css.HadStartedImportingPruningPointUTXOSet(dbTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -34,11 +32,11 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
|
||||
return errors.New("cannot commit virtual UTXO diff after starting to import the pruning point UTXO set")
|
||||
}
|
||||
|
||||
if csss.virtualUTXODiffStaging == nil {
|
||||
if css.virtualUTXODiffStaging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
toRemoveIterator := csss.virtualUTXODiffStaging.ToRemove().Iterator()
|
||||
toRemoveIterator := css.virtualUTXODiffStaging.ToRemove().Iterator()
|
||||
defer toRemoveIterator.Close()
|
||||
for ok := toRemoveIterator.First(); ok; ok = toRemoveIterator.Next() {
|
||||
toRemoveOutpoint, _, err := toRemoveIterator.Get()
|
||||
@@ -46,7 +44,7 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
|
||||
return err
|
||||
}
|
||||
|
||||
csss.store.virtualUTXOSetCache.Remove(toRemoveOutpoint)
|
||||
css.virtualUTXOSetCache.Remove(toRemoveOutpoint)
|
||||
|
||||
dbKey, err := utxoKey(toRemoveOutpoint)
|
||||
if err != nil {
|
||||
@@ -58,7 +56,7 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
|
||||
}
|
||||
}
|
||||
|
||||
toAddIterator := csss.virtualUTXODiffStaging.ToAdd().Iterator()
|
||||
toAddIterator := css.virtualUTXODiffStaging.ToAdd().Iterator()
|
||||
defer toAddIterator.Close()
|
||||
for ok := toAddIterator.First(); ok; ok = toAddIterator.Next() {
|
||||
toAddOutpoint, toAddEntry, err := toAddIterator.Get()
|
||||
@@ -66,7 +64,7 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
|
||||
return err
|
||||
}
|
||||
|
||||
csss.store.virtualUTXOSetCache.Add(toAddOutpoint, toAddEntry)
|
||||
css.virtualUTXOSetCache.Add(toAddOutpoint, toAddEntry)
|
||||
|
||||
dbKey, err := utxoKey(toAddOutpoint)
|
||||
if err != nil {
|
||||
@@ -87,22 +85,21 @@ func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTrans
|
||||
return nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, stagingArea *model.StagingArea,
|
||||
outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, error) {
|
||||
func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (
|
||||
externalapi.UTXOEntry, error) {
|
||||
|
||||
stagingShard := css.stagingShard(stagingArea)
|
||||
|
||||
return css.utxoByOutpointFromStagedVirtualUTXODiff(dbContext, stagingShard, outpoint)
|
||||
return css.utxoByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
stagingShard *consensusStateStagingShard, outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, error) {
|
||||
outpoint *externalapi.DomainOutpoint) (
|
||||
externalapi.UTXOEntry, error) {
|
||||
|
||||
if stagingShard.virtualUTXODiffStaging != nil {
|
||||
if stagingShard.virtualUTXODiffStaging.ToRemove().Contains(outpoint) {
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
if css.virtualUTXODiffStaging.ToRemove().Contains(outpoint) {
|
||||
return nil, errors.Errorf("outpoint was not found")
|
||||
}
|
||||
if utxoEntry, ok := stagingShard.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok {
|
||||
if utxoEntry, ok := css.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok {
|
||||
return utxoEntry, nil
|
||||
}
|
||||
}
|
||||
@@ -130,22 +127,18 @@ func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContex
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, stagingArea *model.StagingArea,
|
||||
outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
|
||||
stagingShard := css.stagingShard(stagingArea)
|
||||
|
||||
return css.hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext, stagingShard, outpoint)
|
||||
func (css *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
return css.hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext, outpoint)
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader,
|
||||
stagingShard *consensusStateStagingShard, outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
outpoint *externalapi.DomainOutpoint) (bool, error) {
|
||||
|
||||
if stagingShard.virtualUTXODiffStaging != nil {
|
||||
if stagingShard.virtualUTXODiffStaging.ToRemove().Contains(outpoint) {
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
if css.virtualUTXODiffStaging.ToRemove().Contains(outpoint) {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := stagingShard.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok {
|
||||
if _, ok := css.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@@ -158,8 +151,8 @@ func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbCon
|
||||
return dbContext.Has(key)
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader, fromOutpoint *externalapi.DomainOutpoint, limit int) (
|
||||
[]*externalapi.OutpointAndUTXOEntryPair, error) {
|
||||
func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader,
|
||||
fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) {
|
||||
|
||||
cursor, err := dbContext.Cursor(utxoSetBucket)
|
||||
if err != nil {
|
||||
@@ -196,19 +189,15 @@ func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader, fromOutpo
|
||||
return outpointAndUTXOEntryPairs, nil
|
||||
}
|
||||
|
||||
func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader, stagingArea *model.StagingArea) (
|
||||
externalapi.ReadOnlyUTXOSetIterator, error) {
|
||||
|
||||
stagingShard := css.stagingShard(stagingArea)
|
||||
|
||||
func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) {
|
||||
cursor, err := dbContext.Cursor(utxoSetBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mainIterator := newCursorUTXOSetIterator(cursor)
|
||||
if stagingShard.virtualUTXODiffStaging != nil {
|
||||
return utxo.IteratorWithDiff(mainIterator, stagingShard.virtualUTXODiffStaging)
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
return utxo.IteratorWithDiff(mainIterator, css.virtualUTXODiffStaging)
|
||||
}
|
||||
|
||||
return mainIterator, nil
|
||||
|
||||
@@ -24,6 +24,10 @@ func (css *consensusStateStore) FinishImportingPruningPointUTXOSet(dbContext mod
|
||||
func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbContext model.DBWriter,
|
||||
pruningPointUTXOSetIterator externalapi.ReadOnlyUTXOSetIterator) error {
|
||||
|
||||
if css.virtualUTXODiffStaging != nil {
|
||||
return errors.New("cannot import virtual UTXO set while virtual UTXO diff is staged")
|
||||
}
|
||||
|
||||
hadStartedImportingPruningPointUTXOSet, err := css.HadStartedImportingPruningPointUTXOSet(dbContext)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package daablocksstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type daaBlocksStagingShard struct {
|
||||
store *daaBlocksStore
|
||||
daaScoreToAdd map[externalapi.DomainHash]uint64
|
||||
daaAddedBlocksToAdd map[externalapi.DomainHash][]*externalapi.DomainHash
|
||||
daaScoreToDelete map[externalapi.DomainHash]struct{}
|
||||
daaAddedBlocksToDelete map[externalapi.DomainHash]struct{}
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) stagingShard(stagingArea *model.StagingArea) *daaBlocksStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDDAABlocks, func() model.StagingShard {
|
||||
return &daaBlocksStagingShard{
|
||||
store: daas,
|
||||
daaScoreToAdd: make(map[externalapi.DomainHash]uint64),
|
||||
daaAddedBlocksToAdd: make(map[externalapi.DomainHash][]*externalapi.DomainHash),
|
||||
daaScoreToDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
daaAddedBlocksToDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
}
|
||||
}).(*daaBlocksStagingShard)
|
||||
}
|
||||
|
||||
func (daass *daaBlocksStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, daaScore := range daass.daaScoreToAdd {
|
||||
daaScoreBytes := binaryserialization.SerializeUint64(daaScore)
|
||||
err := dbTx.Put(daass.store.daaScoreHashAsKey(&hash), daaScoreBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daass.store.daaScoreLRUCache.Add(&hash, daaScore)
|
||||
}
|
||||
|
||||
for hash, addedBlocks := range daass.daaAddedBlocksToAdd {
|
||||
addedBlocksBytes := binaryserialization.SerializeHashes(addedBlocks)
|
||||
err := dbTx.Put(daass.store.daaAddedBlocksHashAsKey(&hash), addedBlocksBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daass.store.daaAddedBlocksLRUCache.Add(&hash, addedBlocks)
|
||||
}
|
||||
|
||||
for hash := range daass.daaScoreToDelete {
|
||||
err := dbTx.Delete(daass.store.daaScoreHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daass.store.daaScoreLRUCache.Remove(&hash)
|
||||
}
|
||||
|
||||
for hash := range daass.daaAddedBlocksToDelete {
|
||||
err := dbTx.Delete(daass.store.daaAddedBlocksHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daass.store.daaAddedBlocksLRUCache.Remove(&hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (daass *daaBlocksStagingShard) isStaged() bool {
|
||||
return len(daass.daaScoreToAdd) != 0 ||
|
||||
len(daass.daaAddedBlocksToAdd) != 0 ||
|
||||
len(daass.daaScoreToDelete) != 0 ||
|
||||
len(daass.daaAddedBlocksToDelete) != 0
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
package daablocksstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
|
||||
)
|
||||
|
||||
var daaScoreBucket = database.MakeBucket([]byte("daa-score"))
|
||||
var daaAddedBlocksBucket = database.MakeBucket([]byte("daa-added-blocks"))
|
||||
|
||||
// daaBlocksStore represents a store of DAABlocksStore
|
||||
type daaBlocksStore struct {
|
||||
daaScoreLRUCache *lrucache.LRUCache
|
||||
daaAddedBlocksLRUCache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new DAABlocksStore
|
||||
func New(daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore {
|
||||
return &daaBlocksStore{
|
||||
daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate),
|
||||
daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) StageDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, daaScore uint64) {
|
||||
stagingShard := daas.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.daaScoreToAdd[*blockHash] = daaScore
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) StageBlockDAAAddedBlocks(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, addedBlocks []*externalapi.DomainHash) {
|
||||
stagingShard := daas.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.daaAddedBlocksToAdd[*blockHash] = externalapi.CloneHashes(addedBlocks)
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return daas.stagingShard(stagingArea).isStaged()
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) DAAScore(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (uint64, error) {
|
||||
stagingShard := daas.stagingShard(stagingArea)
|
||||
|
||||
if daaScore, ok := stagingShard.daaScoreToAdd[*blockHash]; ok {
|
||||
return daaScore, nil
|
||||
}
|
||||
|
||||
if daaScore, ok := daas.daaScoreLRUCache.Get(blockHash); ok {
|
||||
return daaScore.(uint64), nil
|
||||
}
|
||||
|
||||
daaScoreBytes, err := dbContext.Get(daas.daaScoreHashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
daaScore, err := binaryserialization.DeserializeUint64(daaScoreBytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
daas.daaScoreLRUCache.Add(blockHash, daaScore)
|
||||
return daaScore, nil
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) DAAAddedBlocks(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) {
|
||||
stagingShard := daas.stagingShard(stagingArea)
|
||||
|
||||
if addedBlocks, ok := stagingShard.daaAddedBlocksToAdd[*blockHash]; ok {
|
||||
return externalapi.CloneHashes(addedBlocks), nil
|
||||
}
|
||||
|
||||
if addedBlocks, ok := daas.daaAddedBlocksLRUCache.Get(blockHash); ok {
|
||||
return externalapi.CloneHashes(addedBlocks.([]*externalapi.DomainHash)), nil
|
||||
}
|
||||
|
||||
addedBlocksBytes, err := dbContext.Get(daas.daaAddedBlocksHashAsKey(blockHash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addedBlocks, err := binaryserialization.DeserializeHashes(addedBlocksBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
daas.daaAddedBlocksLRUCache.Add(blockHash, addedBlocks)
|
||||
return externalapi.CloneHashes(addedBlocks), nil
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) daaScoreHashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
return daaScoreBucket.Key(hash.ByteSlice())
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) daaAddedBlocksHashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
return daaAddedBlocksBucket.Key(hash.ByteSlice())
|
||||
}
|
||||
|
||||
func (daas *daaBlocksStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
||||
stagingShard := daas.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.daaScoreToAdd[*blockHash]; ok {
|
||||
delete(stagingShard.daaScoreToAdd, *blockHash)
|
||||
} else {
|
||||
stagingShard.daaAddedBlocksToDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
if _, ok := stagingShard.daaAddedBlocksToAdd[*blockHash]; ok {
|
||||
delete(stagingShard.daaAddedBlocksToAdd, *blockHash)
|
||||
} else {
|
||||
stagingShard.daaAddedBlocksToDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package finalitystore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type finalityStagingShard struct {
|
||||
store *finalityStore
|
||||
toAdd map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
}
|
||||
|
||||
func (fs *finalityStore) stagingShard(stagingArea *model.StagingArea) *finalityStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDFinality, func() model.StagingShard {
|
||||
return &finalityStagingShard{
|
||||
store: fs,
|
||||
toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
}
|
||||
}).(*finalityStagingShard)
|
||||
}
|
||||
|
||||
func (fss *finalityStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, finalityPointHash := range fss.toAdd {
|
||||
err := dbTx.Put(fss.store.hashAsKey(&hash), finalityPointHash.ByteSlice())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fss.store.cache.Add(&hash, finalityPointHash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fss *finalityStagingShard) isStaged() bool {
|
||||
return len(fss.toAdd) == 0
|
||||
}
|
||||
@@ -10,26 +10,27 @@ import (
|
||||
var bucket = database.MakeBucket([]byte("finality-points"))
|
||||
|
||||
type finalityStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
staging map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new FinalityStore
|
||||
func New(cacheSize int, preallocate bool) model.FinalityStore {
|
||||
return &finalityStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *finalityStore) StageFinalityPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, finalityPointHash *externalapi.DomainHash) {
|
||||
stagingShard := fs.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.toAdd[*blockHash] = finalityPointHash
|
||||
func (fs *finalityStore) StageFinalityPoint(blockHash *externalapi.DomainHash, finalityPointHash *externalapi.DomainHash) {
|
||||
fs.staging[*blockHash] = finalityPointHash
|
||||
}
|
||||
|
||||
func (fs *finalityStore) FinalityPoint(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
stagingShard := fs.stagingShard(stagingArea)
|
||||
|
||||
if finalityPointHash, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (fs *finalityStore) FinalityPoint(
|
||||
dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
if finalityPointHash, ok := fs.staging[*blockHash]; ok {
|
||||
return finalityPointHash, nil
|
||||
}
|
||||
|
||||
@@ -50,8 +51,25 @@ func (fs *finalityStore) FinalityPoint(dbContext model.DBReader, stagingArea *mo
|
||||
return finalityPointHash, nil
|
||||
}
|
||||
|
||||
func (fs *finalityStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return fs.stagingShard(stagingArea).isStaged()
|
||||
func (fs *finalityStore) Discard() {
|
||||
fs.staging = make(map[externalapi.DomainHash]*externalapi.DomainHash)
|
||||
}
|
||||
|
||||
func (fs *finalityStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, finalityPointHash := range fs.staging {
|
||||
err := dbTx.Put(fs.hashAsKey(&hash), finalityPointHash.ByteSlice())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.cache.Add(&hash, finalityPointHash)
|
||||
}
|
||||
|
||||
fs.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *finalityStore) IsStaged() bool {
|
||||
return len(fs.staging) == 0
|
||||
}
|
||||
|
||||
func (fs *finalityStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
package ghostdagdatastore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type ghostdagDataStagingShard struct {
|
||||
store *ghostdagDataStore
|
||||
toAdd map[externalapi.DomainHash]*model.BlockGHOSTDAGData
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) stagingShard(stagingArea *model.StagingArea) *ghostdagDataStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDGHOSTDAG, func() model.StagingShard {
|
||||
return &ghostdagDataStagingShard{
|
||||
store: gds,
|
||||
toAdd: make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData),
|
||||
}
|
||||
}).(*ghostdagDataStagingShard)
|
||||
}
|
||||
|
||||
func (gdss *ghostdagDataStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, blockGHOSTDAGData := range gdss.toAdd {
|
||||
blockGhostdagDataBytes, err := gdss.store.serializeBlockGHOSTDAGData(blockGHOSTDAGData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(gdss.store.hashAsKey(&hash), blockGhostdagDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gdss.store.cache.Add(&hash, blockGHOSTDAGData)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gdss *ghostdagDataStagingShard) isStaged() bool {
|
||||
return len(gdss.toAdd) != 0
|
||||
}
|
||||
@@ -13,32 +13,51 @@ var bucket = database.MakeBucket([]byte("block-ghostdag-data"))
|
||||
|
||||
// ghostdagDataStore represents a store of BlockGHOSTDAGData
|
||||
type ghostdagDataStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
staging map[externalapi.DomainHash]*model.BlockGHOSTDAGData
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new GHOSTDAGDataStore
|
||||
func New(cacheSize int, preallocate bool) model.GHOSTDAGDataStore {
|
||||
return &ghostdagDataStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given blockGHOSTDAGData for the given blockHash
|
||||
func (gds *ghostdagDataStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) {
|
||||
stagingShard := gds.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.toAdd[*blockHash] = blockGHOSTDAGData
|
||||
func (gds *ghostdagDataStore) Stage(blockHash *externalapi.DomainHash, blockGHOSTDAGData *model.BlockGHOSTDAGData) {
|
||||
gds.staging[*blockHash] = blockGHOSTDAGData
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return gds.stagingShard(stagingArea).isStaged()
|
||||
func (gds *ghostdagDataStore) IsStaged() bool {
|
||||
return len(gds.staging) != 0
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) Discard() {
|
||||
gds.staging = make(map[externalapi.DomainHash]*model.BlockGHOSTDAGData)
|
||||
}
|
||||
|
||||
func (gds *ghostdagDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, blockGHOSTDAGData := range gds.staging {
|
||||
blockGhostdagDataBytes, err := gds.serializeBlockGHOSTDAGData(blockGHOSTDAGData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(gds.hashAsKey(&hash), blockGhostdagDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gds.cache.Add(&hash, blockGHOSTDAGData)
|
||||
}
|
||||
|
||||
gds.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the blockGHOSTDAGData associated with the given blockHash
|
||||
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*model.BlockGHOSTDAGData, error) {
|
||||
stagingShard := gds.stagingShard(stagingArea)
|
||||
|
||||
if blockGHOSTDAGData, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (gds *ghostdagDataStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*model.BlockGHOSTDAGData, error) {
|
||||
if blockGHOSTDAGData, ok := gds.staging[*blockHash]; ok {
|
||||
return blockGHOSTDAGData, nil
|
||||
}
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package headersselectedchainstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type headersSelectedChainStagingShard struct {
|
||||
store *headersSelectedChainStore
|
||||
addedByHash map[externalapi.DomainHash]uint64
|
||||
removedByHash map[externalapi.DomainHash]struct{}
|
||||
addedByIndex map[uint64]*externalapi.DomainHash
|
||||
removedByIndex map[uint64]struct{}
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedChainStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedChain, func() model.StagingShard {
|
||||
return &headersSelectedChainStagingShard{
|
||||
store: hscs,
|
||||
addedByHash: make(map[externalapi.DomainHash]uint64),
|
||||
removedByHash: make(map[externalapi.DomainHash]struct{}),
|
||||
addedByIndex: make(map[uint64]*externalapi.DomainHash),
|
||||
removedByIndex: make(map[uint64]struct{}),
|
||||
}
|
||||
}).(*headersSelectedChainStagingShard)
|
||||
}
|
||||
|
||||
func (hscss *headersSelectedChainStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
if !hscss.isStaged() {
|
||||
return nil
|
||||
}
|
||||
|
||||
for hash := range hscss.removedByHash {
|
||||
hashCopy := hash
|
||||
err := dbTx.Delete(hscss.store.hashAsKey(&hashCopy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hscss.store.cacheByHash.Remove(&hashCopy)
|
||||
}
|
||||
|
||||
for index := range hscss.removedByIndex {
|
||||
err := dbTx.Delete(hscss.store.indexAsKey(index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hscss.store.cacheByIndex.Remove(index)
|
||||
}
|
||||
|
||||
highestIndex := uint64(0)
|
||||
for hash, index := range hscss.addedByHash {
|
||||
hashCopy := hash
|
||||
err := dbTx.Put(hscss.store.hashAsKey(&hashCopy), hscss.store.serializeIndex(index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(hscss.store.indexAsKey(index), binaryserialization.SerializeHash(&hashCopy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hscss.store.cacheByHash.Add(&hashCopy, index)
|
||||
hscss.store.cacheByIndex.Add(index, &hashCopy)
|
||||
|
||||
if index > highestIndex {
|
||||
highestIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
err := dbTx.Put(highestChainBlockIndexKey, hscss.store.serializeIndex(highestIndex))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hscss.store.cacheHighestChainBlockIndex = highestIndex
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hscss *headersSelectedChainStagingShard) isStaged() bool {
|
||||
return len(hscss.addedByHash) != 0 ||
|
||||
len(hscss.removedByHash) != 0 ||
|
||||
len(hscss.addedByIndex) != 0 ||
|
||||
len(hscss.addedByIndex) != 0
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package headersselectedchainstore
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
@@ -17,6 +16,10 @@ var bucketChainBlockIndexByHash = database.MakeBucket([]byte("chain-block-index-
|
||||
var highestChainBlockIndexKey = database.MakeBucket(nil).Key([]byte("highest-chain-block-index"))
|
||||
|
||||
type headersSelectedChainStore struct {
|
||||
stagingAddedByHash map[externalapi.DomainHash]uint64
|
||||
stagingRemovedByHash map[externalapi.DomainHash]struct{}
|
||||
stagingAddedByIndex map[uint64]*externalapi.DomainHash
|
||||
stagingRemovedByIndex map[uint64]struct{}
|
||||
cacheByIndex *lrucacheuint64tohash.LRUCache
|
||||
cacheByHash *lrucache.LRUCache
|
||||
cacheHighestChainBlockIndex uint64
|
||||
@@ -25,27 +28,31 @@ type headersSelectedChainStore struct {
|
||||
// New instantiates a new HeadersSelectedChainStore
|
||||
func New(cacheSize int, preallocate bool) model.HeadersSelectedChainStore {
|
||||
return &headersSelectedChainStore{
|
||||
cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate),
|
||||
cacheByHash: lrucache.New(cacheSize, preallocate),
|
||||
stagingAddedByHash: make(map[externalapi.DomainHash]uint64),
|
||||
stagingRemovedByHash: make(map[externalapi.DomainHash]struct{}),
|
||||
stagingAddedByIndex: make(map[uint64]*externalapi.DomainHash),
|
||||
stagingRemovedByIndex: make(map[uint64]struct{}),
|
||||
cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate),
|
||||
cacheByHash: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given chain changes
|
||||
func (hscs *headersSelectedChainStore) Stage(dbContext model.DBReader, stagingArea *model.StagingArea, chainChanges *externalapi.SelectedChainPath) error {
|
||||
stagingShard := hscs.stagingShard(stagingArea)
|
||||
func (hscs *headersSelectedChainStore) Stage(dbContext model.DBReader,
|
||||
chainChanges *externalapi.SelectedChainPath) error {
|
||||
|
||||
if hscs.IsStaged(stagingArea) {
|
||||
if hscs.IsStaged() {
|
||||
return errors.Errorf("can't stage when there's already staged data")
|
||||
}
|
||||
|
||||
for _, blockHash := range chainChanges.Removed {
|
||||
index, err := hscs.GetIndexByHash(dbContext, stagingArea, blockHash)
|
||||
index, err := hscs.GetIndexByHash(dbContext, blockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stagingShard.removedByIndex[index] = struct{}{}
|
||||
stagingShard.removedByHash[*blockHash] = struct{}{}
|
||||
hscs.stagingRemovedByIndex[index] = struct{}{}
|
||||
hscs.stagingRemovedByHash[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
currentIndex := uint64(0)
|
||||
@@ -59,27 +66,89 @@ func (hscs *headersSelectedChainStore) Stage(dbContext model.DBReader, stagingAr
|
||||
}
|
||||
|
||||
for _, blockHash := range chainChanges.Added {
|
||||
stagingShard.addedByIndex[currentIndex] = blockHash
|
||||
stagingShard.addedByHash[*blockHash] = currentIndex
|
||||
hscs.stagingAddedByIndex[currentIndex] = blockHash
|
||||
hscs.stagingAddedByHash[*blockHash] = currentIndex
|
||||
currentIndex++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return hscs.stagingShard(stagingArea).isStaged()
|
||||
func (hscs *headersSelectedChainStore) IsStaged() bool {
|
||||
return len(hscs.stagingAddedByHash) != 0 ||
|
||||
len(hscs.stagingRemovedByHash) != 0 ||
|
||||
len(hscs.stagingAddedByIndex) != 0 ||
|
||||
len(hscs.stagingAddedByIndex) != 0
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) Discard() {
|
||||
hscs.stagingAddedByHash = make(map[externalapi.DomainHash]uint64)
|
||||
hscs.stagingRemovedByHash = make(map[externalapi.DomainHash]struct{})
|
||||
hscs.stagingAddedByIndex = make(map[uint64]*externalapi.DomainHash)
|
||||
hscs.stagingRemovedByIndex = make(map[uint64]struct{})
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) Commit(dbTx model.DBTransaction) error {
|
||||
if !hscs.IsStaged() {
|
||||
return nil
|
||||
}
|
||||
|
||||
for hash := range hscs.stagingRemovedByHash {
|
||||
hashCopy := hash
|
||||
err := dbTx.Delete(hscs.hashAsKey(&hashCopy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hscs.cacheByHash.Remove(&hashCopy)
|
||||
}
|
||||
|
||||
for index := range hscs.stagingRemovedByIndex {
|
||||
err := dbTx.Delete(hscs.indexAsKey(index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hscs.cacheByIndex.Remove(index)
|
||||
}
|
||||
|
||||
highestIndex := uint64(0)
|
||||
for hash, index := range hscs.stagingAddedByHash {
|
||||
hashCopy := hash
|
||||
err := dbTx.Put(hscs.hashAsKey(&hashCopy), hscs.serializeIndex(index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dbTx.Put(hscs.indexAsKey(index), binaryserialization.SerializeHash(&hashCopy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hscs.cacheByHash.Add(&hashCopy, index)
|
||||
hscs.cacheByIndex.Add(index, &hashCopy)
|
||||
|
||||
if index > highestIndex {
|
||||
highestIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
err := dbTx.Put(highestChainBlockIndexKey, hscs.serializeIndex(highestIndex))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hscs.cacheHighestChainBlockIndex = highestIndex
|
||||
|
||||
hscs.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the chain block index for the given blockHash
|
||||
func (hscs *headersSelectedChainStore) GetIndexByHash(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (uint64, error) {
|
||||
stagingShard := hscs.stagingShard(stagingArea)
|
||||
|
||||
if index, ok := stagingShard.addedByHash[*blockHash]; ok {
|
||||
func (hscs *headersSelectedChainStore) GetIndexByHash(dbContext model.DBReader, blockHash *externalapi.DomainHash) (uint64, error) {
|
||||
if index, ok := hscs.stagingAddedByHash[*blockHash]; ok {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
if _, ok := stagingShard.removedByHash[*blockHash]; ok {
|
||||
if _, ok := hscs.stagingRemovedByHash[*blockHash]; ok {
|
||||
return 0, errors.Wrapf(database.ErrNotFound, "couldn't find block %s", blockHash)
|
||||
}
|
||||
|
||||
@@ -92,23 +161,17 @@ func (hscs *headersSelectedChainStore) GetIndexByHash(dbContext model.DBReader,
|
||||
return 0, err
|
||||
}
|
||||
|
||||
index, err := hscs.deserializeIndex(indexBytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
index := hscs.deserializeIndex(indexBytes)
|
||||
hscs.cacheByHash.Add(blockHash, index)
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) GetHashByIndex(dbContext model.DBReader, stagingArea *model.StagingArea, index uint64) (*externalapi.DomainHash, error) {
|
||||
stagingShard := hscs.stagingShard(stagingArea)
|
||||
|
||||
if blockHash, ok := stagingShard.addedByIndex[index]; ok {
|
||||
func (hscs *headersSelectedChainStore) GetHashByIndex(dbContext model.DBReader, index uint64) (*externalapi.DomainHash, error) {
|
||||
if blockHash, ok := hscs.stagingAddedByIndex[index]; ok {
|
||||
return blockHash, nil
|
||||
}
|
||||
|
||||
if _, ok := stagingShard.removedByIndex[index]; ok {
|
||||
if _, ok := hscs.stagingRemovedByIndex[index]; ok {
|
||||
return nil, errors.Wrapf(database.ErrNotFound, "couldn't find chain block with index %d", index)
|
||||
}
|
||||
|
||||
@@ -130,11 +193,11 @@ func (hscs *headersSelectedChainStore) GetHashByIndex(dbContext model.DBReader,
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) serializeIndex(index uint64) []byte {
|
||||
return binaryserialization.SerializeUint64(index)
|
||||
return binaryserialization.SerializeChainBlockIndex(index)
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) deserializeIndex(indexBytes []byte) (uint64, error) {
|
||||
return binaryserialization.DeserializeUint64(indexBytes)
|
||||
func (hscs *headersSelectedChainStore) deserializeIndex(indexBytes []byte) uint64 {
|
||||
return binaryserialization.DeserializeChainBlockIndex(indexBytes)
|
||||
}
|
||||
|
||||
func (hscs *headersSelectedChainStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
@@ -160,11 +223,7 @@ func (hscs *headersSelectedChainStore) highestChainBlockIndex(dbContext model.DB
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
index, err := hscs.deserializeIndex(indexBytes)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
index := hscs.deserializeIndex(indexBytes)
|
||||
hscs.cacheHighestChainBlockIndex = index
|
||||
return index, true, nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package headersselectedtipstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type headersSelectedTipStagingShard struct {
|
||||
store *headerSelectedTipStore
|
||||
newSelectedTip *externalapi.DomainHash
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedTipStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedTip, func() model.StagingShard {
|
||||
return &headersSelectedTipStagingShard{
|
||||
store: hsts,
|
||||
newSelectedTip: nil,
|
||||
}
|
||||
}).(*headersSelectedTipStagingShard)
|
||||
}
|
||||
|
||||
func (hstss *headersSelectedTipStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
if hstss.newSelectedTip == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
selectedTipBytes, err := hstss.store.serializeHeadersSelectedTip(hstss.newSelectedTip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(headerSelectedTipKey, selectedTipBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hstss.store.cache = hstss.newSelectedTip
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hstss *headersSelectedTipStagingShard) isStaged() bool {
|
||||
return hstss.newSelectedTip != nil
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package headersselectedtipstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
var headerSelectedTipKey = database.MakeBucket(nil).Key([]byte("headers-selected-tip"))
|
||||
|
||||
type headerSelectedTipStore struct {
|
||||
cache *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new HeaderSelectedTipStore
|
||||
func New() model.HeaderSelectedTipStore {
|
||||
return &headerSelectedTipStore{}
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) Has(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) {
|
||||
stagingShard := hsts.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.newSelectedTip != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if hsts.cache != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(headerSelectedTipKey)
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) Stage(stagingArea *model.StagingArea, selectedTip *externalapi.DomainHash) {
|
||||
stagingShard := hsts.stagingShard(stagingArea)
|
||||
stagingShard.newSelectedTip = selectedTip
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return hsts.stagingShard(stagingArea).isStaged()
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) HeadersSelectedTip(dbContext model.DBReader, stagingArea *model.StagingArea) (
|
||||
*externalapi.DomainHash, error) {
|
||||
|
||||
stagingShard := hsts.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.newSelectedTip != nil {
|
||||
return stagingShard.newSelectedTip, nil
|
||||
}
|
||||
|
||||
if hsts.cache != nil {
|
||||
return hsts.cache, nil
|
||||
}
|
||||
|
||||
selectedTipBytes, err := dbContext.Get(headerSelectedTipKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selectedTip, err := hsts.deserializeHeadersSelectedTip(selectedTipBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hsts.cache = selectedTip
|
||||
return hsts.cache, nil
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) serializeHeadersSelectedTip(selectedTip *externalapi.DomainHash) ([]byte, error) {
|
||||
return proto.Marshal(serialization.DomainHashToDbHash(selectedTip))
|
||||
}
|
||||
|
||||
func (hsts *headerSelectedTipStore) deserializeHeadersSelectedTip(selectedTipBytes []byte) (*externalapi.DomainHash, error) {
|
||||
dbHash := &serialization.DbHash{}
|
||||
err := proto.Unmarshal(selectedTipBytes, dbHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serialization.DbHashToDomainHash(dbHash)
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
package headersselectedtipstore
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
var headerSelectedTipKey = database.MakeBucket(nil).Key([]byte("headers-selected-tip"))
|
||||
|
||||
type headerSelectedTipStore struct {
|
||||
staging *externalapi.DomainHash
|
||||
cache *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new HeaderSelectedTipStore
|
||||
func New() model.HeaderSelectedTipStore {
|
||||
return &headerSelectedTipStore{}
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Has(dbContext model.DBReader) (bool, error) {
|
||||
if hts.staging != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if hts.cache != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return dbContext.Has(headerSelectedTipKey)
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Discard() {
|
||||
hts.staging = nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Commit(dbTx model.DBTransaction) error {
|
||||
if hts.staging == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
selectedTipBytes, err := hts.serializeHeadersSelectedTip(hts.staging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(headerSelectedTipKey, selectedTipBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hts.cache = hts.staging
|
||||
|
||||
hts.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) Stage(selectedTip *externalapi.DomainHash) {
|
||||
hts.staging = selectedTip
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) IsStaged() bool {
|
||||
return hts.staging != nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) HeadersSelectedTip(dbContext model.DBReader) (*externalapi.DomainHash, error) {
|
||||
if hts.staging != nil {
|
||||
return hts.staging, nil
|
||||
}
|
||||
|
||||
if hts.cache != nil {
|
||||
return hts.cache, nil
|
||||
}
|
||||
|
||||
selectedTipBytes, err := dbContext.Get(headerSelectedTipKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selectedTip, err := hts.deserializeHeadersSelectedTip(selectedTipBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hts.cache = selectedTip
|
||||
return hts.cache, nil
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) serializeHeadersSelectedTip(selectedTip *externalapi.DomainHash) ([]byte, error) {
|
||||
return proto.Marshal(serialization.DomainHashToDbHash(selectedTip))
|
||||
}
|
||||
|
||||
func (hts *headerSelectedTipStore) deserializeHeadersSelectedTip(selectedTipBytes []byte) (*externalapi.DomainHash, error) {
|
||||
dbHash := &serialization.DbHash{}
|
||||
err := proto.Unmarshal(selectedTipBytes, dbHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serialization.DbHashToDomainHash(dbHash)
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package multisetstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type multisetStagingShard struct {
|
||||
store *multisetStore
|
||||
toAdd map[externalapi.DomainHash]model.Multiset
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
}
|
||||
|
||||
func (ms *multisetStore) stagingShard(stagingArea *model.StagingArea) *multisetStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDMultiset, func() model.StagingShard {
|
||||
return &multisetStagingShard{
|
||||
store: ms,
|
||||
toAdd: make(map[externalapi.DomainHash]model.Multiset),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
}
|
||||
}).(*multisetStagingShard)
|
||||
}
|
||||
|
||||
func (mss *multisetStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, multiset := range mss.toAdd {
|
||||
multisetBytes, err := mss.store.serializeMultiset(multiset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(mss.store.hashAsKey(&hash), multisetBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mss.store.cache.Add(&hash, multiset)
|
||||
}
|
||||
|
||||
for hash := range mss.toDelete {
|
||||
err := dbTx.Delete(mss.store.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mss.store.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mss *multisetStagingShard) isStaged() bool {
|
||||
return len(mss.toAdd) != 0 || len(mss.toDelete) != 0
|
||||
}
|
||||
@@ -13,32 +13,62 @@ var bucket = database.MakeBucket([]byte("multisets"))
|
||||
|
||||
// multisetStore represents a store of Multisets
|
||||
type multisetStore struct {
|
||||
cache *lrucache.LRUCache
|
||||
staging map[externalapi.DomainHash]model.Multiset
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
cache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new MultisetStore
|
||||
func New(cacheSize int, preallocate bool) model.MultisetStore {
|
||||
return &multisetStore{
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
staging: make(map[externalapi.DomainHash]model.Multiset),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
cache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given multiset for the given blockHash
|
||||
func (ms *multisetStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, multiset model.Multiset) {
|
||||
stagingShard := ms.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.toAdd[*blockHash] = multiset.Clone()
|
||||
func (ms *multisetStore) Stage(blockHash *externalapi.DomainHash, multiset model.Multiset) {
|
||||
ms.staging[*blockHash] = multiset.Clone()
|
||||
}
|
||||
|
||||
func (ms *multisetStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return ms.stagingShard(stagingArea).isStaged()
|
||||
func (ms *multisetStore) IsStaged() bool {
|
||||
return len(ms.staging) != 0 || len(ms.toDelete) != 0
|
||||
}
|
||||
|
||||
func (ms *multisetStore) Discard() {
|
||||
ms.staging = make(map[externalapi.DomainHash]model.Multiset)
|
||||
ms.toDelete = make(map[externalapi.DomainHash]struct{})
|
||||
}
|
||||
|
||||
func (ms *multisetStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, multiset := range ms.staging {
|
||||
multisetBytes, err := ms.serializeMultiset(multiset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(ms.hashAsKey(&hash), multisetBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ms.cache.Add(&hash, multiset)
|
||||
}
|
||||
|
||||
for hash := range ms.toDelete {
|
||||
err := dbTx.Delete(ms.hashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ms.cache.Remove(&hash)
|
||||
}
|
||||
|
||||
ms.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the multiset associated with the given blockHash
|
||||
func (ms *multisetStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.Multiset, error) {
|
||||
stagingShard := ms.stagingShard(stagingArea)
|
||||
|
||||
if multiset, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
func (ms *multisetStore) Get(dbContext model.DBReader, blockHash *externalapi.DomainHash) (model.Multiset, error) {
|
||||
if multiset, ok := ms.staging[*blockHash]; ok {
|
||||
return multiset.Clone(), nil
|
||||
}
|
||||
|
||||
@@ -60,14 +90,12 @@ func (ms *multisetStore) Get(dbContext model.DBReader, stagingArea *model.Stagin
|
||||
}
|
||||
|
||||
// Delete deletes the multiset associated with the given blockHash
|
||||
func (ms *multisetStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
||||
stagingShard := ms.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.toAdd[*blockHash]; ok {
|
||||
delete(stagingShard.toAdd, *blockHash)
|
||||
func (ms *multisetStore) Delete(blockHash *externalapi.DomainHash) {
|
||||
if _, ok := ms.staging[*blockHash]; ok {
|
||||
delete(ms.staging, *blockHash)
|
||||
return
|
||||
}
|
||||
stagingShard.toDelete[*blockHash] = struct{}{}
|
||||
ms.toDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (ms *multisetStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
@@ -1,64 +0,0 @@
|
||||
package pruningstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type pruningStagingShard struct {
|
||||
store *pruningStore
|
||||
|
||||
newPruningPoint *externalapi.DomainHash
|
||||
newPruningPointCandidate *externalapi.DomainHash
|
||||
startUpdatingPruningPointUTXOSet bool
|
||||
}
|
||||
|
||||
func (ps *pruningStore) stagingShard(stagingArea *model.StagingArea) *pruningStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDPruning, func() model.StagingShard {
|
||||
return &pruningStagingShard{
|
||||
store: ps,
|
||||
newPruningPoint: nil,
|
||||
newPruningPointCandidate: nil,
|
||||
startUpdatingPruningPointUTXOSet: false,
|
||||
}
|
||||
}).(*pruningStagingShard)
|
||||
}
|
||||
|
||||
func (mss *pruningStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
if mss.newPruningPoint != nil {
|
||||
pruningPointBytes, err := mss.store.serializeHash(mss.newPruningPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(pruningBlockHashKey, pruningPointBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mss.store.pruningPointCache = mss.newPruningPoint
|
||||
}
|
||||
|
||||
if mss.newPruningPointCandidate != nil {
|
||||
candidateBytes, err := mss.store.serializeHash(mss.newPruningPointCandidate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(candidatePruningPointHashKey, candidateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mss.store.pruningPointCandidateCache = mss.newPruningPointCandidate
|
||||
}
|
||||
|
||||
if mss.startUpdatingPruningPointUTXOSet {
|
||||
err := dbTx.Put(updatingPruningPointUTXOSetKey, []byte{0})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mss *pruningStagingShard) isStaged() bool {
|
||||
return mss.newPruningPoint != nil || mss.startUpdatingPruningPointUTXOSet
|
||||
}
|
||||
@@ -15,8 +15,12 @@ var updatingPruningPointUTXOSetKey = database.MakeBucket(nil).Key([]byte("updati
|
||||
|
||||
// pruningStore represents a store for the current pruning state
|
||||
type pruningStore struct {
|
||||
pruningPointCache *externalapi.DomainHash
|
||||
pruningPointCandidateCache *externalapi.DomainHash
|
||||
pruningPointStaging *externalapi.DomainHash
|
||||
pruningPointCache *externalapi.DomainHash
|
||||
pruningPointCandidateStaging *externalapi.DomainHash
|
||||
pruningPointCandidateCache *externalapi.DomainHash
|
||||
|
||||
startUpdatingPruningPointUTXOSetStaging bool
|
||||
}
|
||||
|
||||
// New instantiates a new PruningStore
|
||||
@@ -24,17 +28,13 @@ func New() model.PruningStore {
|
||||
return &pruningStore{}
|
||||
}
|
||||
|
||||
func (ps *pruningStore) StagePruningPointCandidate(stagingArea *model.StagingArea, candidate *externalapi.DomainHash) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.newPruningPointCandidate = candidate
|
||||
func (ps *pruningStore) StagePruningPointCandidate(candidate *externalapi.DomainHash) {
|
||||
ps.pruningPointCandidateStaging = candidate
|
||||
}
|
||||
|
||||
func (ps *pruningStore) PruningPointCandidate(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.newPruningPointCandidate != nil {
|
||||
return stagingShard.newPruningPointCandidate, nil
|
||||
func (ps *pruningStore) PruningPointCandidate(dbContext model.DBReader) (*externalapi.DomainHash, error) {
|
||||
if ps.pruningPointCandidateStaging != nil {
|
||||
return ps.pruningPointCandidateStaging, nil
|
||||
}
|
||||
|
||||
if ps.pruningPointCandidateCache != nil {
|
||||
@@ -54,10 +54,8 @@ func (ps *pruningStore) PruningPointCandidate(dbContext model.DBReader, stagingA
|
||||
return candidate, nil
|
||||
}
|
||||
|
||||
func (ps *pruningStore) HasPruningPointCandidate(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.newPruningPointCandidate != nil {
|
||||
func (ps *pruningStore) HasPruningPointCandidate(dbContext model.DBReader) (bool, error) {
|
||||
if ps.pruningPointCandidateStaging != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -69,14 +67,53 @@ func (ps *pruningStore) HasPruningPointCandidate(dbContext model.DBReader, stagi
|
||||
}
|
||||
|
||||
// Stage stages the pruning state
|
||||
func (ps *pruningStore) StagePruningPoint(stagingArea *model.StagingArea, pruningPointBlockHash *externalapi.DomainHash) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.newPruningPoint = pruningPointBlockHash
|
||||
func (ps *pruningStore) StagePruningPoint(pruningPointBlockHash *externalapi.DomainHash) {
|
||||
ps.pruningPointStaging = pruningPointBlockHash
|
||||
}
|
||||
|
||||
func (ps *pruningStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return ps.stagingShard(stagingArea).isStaged()
|
||||
func (ps *pruningStore) IsStaged() bool {
|
||||
return ps.pruningPointStaging != nil || ps.startUpdatingPruningPointUTXOSetStaging
|
||||
}
|
||||
|
||||
func (ps *pruningStore) Discard() {
|
||||
ps.pruningPointStaging = nil
|
||||
ps.startUpdatingPruningPointUTXOSetStaging = false
|
||||
}
|
||||
|
||||
func (ps *pruningStore) Commit(dbTx model.DBTransaction) error {
|
||||
if ps.pruningPointStaging != nil {
|
||||
pruningPointBytes, err := ps.serializeHash(ps.pruningPointStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(pruningBlockHashKey, pruningPointBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ps.pruningPointCache = ps.pruningPointStaging
|
||||
}
|
||||
|
||||
if ps.pruningPointCandidateStaging != nil {
|
||||
candidateBytes, err := ps.serializeHash(ps.pruningPointCandidateStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(candidatePruningPointHashKey, candidateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ps.pruningPointCandidateCache = ps.pruningPointCandidateStaging
|
||||
}
|
||||
|
||||
if ps.startUpdatingPruningPointUTXOSetStaging {
|
||||
err := dbTx.Put(updatingPruningPointUTXOSetKey, []byte{0})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ps.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter,
|
||||
@@ -124,11 +161,9 @@ func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter,
|
||||
}
|
||||
|
||||
// PruningPoint gets the current pruning point
|
||||
func (ps *pruningStore) PruningPoint(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.newPruningPoint != nil {
|
||||
return stagingShard.newPruningPoint, nil
|
||||
func (ps *pruningStore) PruningPoint(dbContext model.DBReader) (*externalapi.DomainHash, error) {
|
||||
if ps.pruningPointStaging != nil {
|
||||
return ps.pruningPointStaging, nil
|
||||
}
|
||||
|
||||
if ps.pruningPointCache != nil {
|
||||
@@ -162,10 +197,8 @@ func (ps *pruningStore) deserializePruningPoint(pruningPointBytes []byte) (*exte
|
||||
return serialization.DbHashToDomainHash(dbHash)
|
||||
}
|
||||
|
||||
func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.newPruningPoint != nil {
|
||||
func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader) (bool, error) {
|
||||
if ps.pruningPointStaging != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -214,10 +247,8 @@ func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader,
|
||||
return outpointAndUTXOEntryPairs, nil
|
||||
}
|
||||
|
||||
func (ps *pruningStore) StageStartUpdatingPruningPointUTXOSet(stagingArea *model.StagingArea) {
|
||||
stagingShard := ps.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.startUpdatingPruningPointUTXOSet = true
|
||||
func (ps *pruningStore) StageStartUpdatingPruningPointUTXOSet() {
|
||||
ps.startUpdatingPruningPointUTXOSetStaging = true
|
||||
}
|
||||
|
||||
func (ps *pruningStore) HadStartedUpdatingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) {
|
||||
@@ -1,53 +0,0 @@
|
||||
package reachabilitydatastore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type reachabilityDataStagingShard struct {
|
||||
store *reachabilityDataStore
|
||||
reachabilityData map[externalapi.DomainHash]model.ReachabilityData
|
||||
reachabilityReindexRoot *externalapi.DomainHash
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) stagingShard(stagingArea *model.StagingArea) *reachabilityDataStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDReachabilityData, func() model.StagingShard {
|
||||
return &reachabilityDataStagingShard{
|
||||
store: rds,
|
||||
reachabilityData: make(map[externalapi.DomainHash]model.ReachabilityData),
|
||||
reachabilityReindexRoot: nil,
|
||||
}
|
||||
}).(*reachabilityDataStagingShard)
|
||||
}
|
||||
|
||||
func (rdss *reachabilityDataStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
if rdss.reachabilityReindexRoot != nil {
|
||||
reachabilityReindexRootBytes, err := rdss.store.serializeReachabilityReindexRoot(rdss.reachabilityReindexRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(reachabilityReindexRootKey, reachabilityReindexRootBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdss.store.reachabilityReindexRootCache = rdss.reachabilityReindexRoot
|
||||
}
|
||||
for hash, reachabilityData := range rdss.reachabilityData {
|
||||
reachabilityDataBytes, err := rdss.store.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(rdss.store.reachabilityDataBlockHashAsKey(&hash), reachabilityDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdss.store.reachabilityDataCache.Add(&hash, reachabilityData)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rdss *reachabilityDataStagingShard) isStaged() bool {
|
||||
return len(rdss.reachabilityData) != 0 || rdss.reachabilityReindexRoot != nil
|
||||
}
|
||||
@@ -14,40 +14,74 @@ var reachabilityReindexRootKey = database.MakeBucket(nil).Key([]byte("reachabili
|
||||
|
||||
// reachabilityDataStore represents a store of ReachabilityData
|
||||
type reachabilityDataStore struct {
|
||||
reachabilityDataCache *lrucache.LRUCache
|
||||
reachabilityReindexRootCache *externalapi.DomainHash
|
||||
reachabilityDataStaging map[externalapi.DomainHash]model.ReachabilityData
|
||||
reachabilityReindexRootStaging *externalapi.DomainHash
|
||||
reachabilityDataCache *lrucache.LRUCache
|
||||
reachabilityReindexRootCache *externalapi.DomainHash
|
||||
}
|
||||
|
||||
// New instantiates a new ReachabilityDataStore
|
||||
func New(cacheSize int, preallocate bool) model.ReachabilityDataStore {
|
||||
return &reachabilityDataStore{
|
||||
reachabilityDataCache: lrucache.New(cacheSize, preallocate),
|
||||
reachabilityDataStaging: make(map[externalapi.DomainHash]model.ReachabilityData),
|
||||
reachabilityDataCache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// StageReachabilityData stages the given reachabilityData for the given blockHash
|
||||
func (rds *reachabilityDataStore) StageReachabilityData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, reachabilityData model.ReachabilityData) {
|
||||
stagingShard := rds.stagingShard(stagingArea)
|
||||
func (rds *reachabilityDataStore) StageReachabilityData(blockHash *externalapi.DomainHash,
|
||||
reachabilityData model.ReachabilityData) {
|
||||
|
||||
stagingShard.reachabilityData[*blockHash] = reachabilityData
|
||||
rds.reachabilityDataStaging[*blockHash] = reachabilityData
|
||||
}
|
||||
|
||||
// StageReachabilityReindexRoot stages the given reachabilityReindexRoot
|
||||
func (rds *reachabilityDataStore) StageReachabilityReindexRoot(stagingArea *model.StagingArea, reachabilityReindexRoot *externalapi.DomainHash) {
|
||||
stagingShard := rds.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.reachabilityReindexRoot = reachabilityReindexRoot
|
||||
func (rds *reachabilityDataStore) StageReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) {
|
||||
rds.reachabilityReindexRootStaging = reachabilityReindexRoot
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return rds.stagingShard(stagingArea).isStaged()
|
||||
func (rds *reachabilityDataStore) IsAnythingStaged() bool {
|
||||
return len(rds.reachabilityDataStaging) != 0 || rds.reachabilityReindexRootStaging != nil
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) Discard() {
|
||||
rds.reachabilityDataStaging = make(map[externalapi.DomainHash]model.ReachabilityData)
|
||||
rds.reachabilityReindexRootStaging = nil
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) Commit(dbTx model.DBTransaction) error {
|
||||
if rds.reachabilityReindexRootStaging != nil {
|
||||
reachabilityReindexRootBytes, err := rds.serializeReachabilityReindexRoot(rds.reachabilityReindexRootStaging)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(reachabilityReindexRootKey, reachabilityReindexRootBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rds.reachabilityReindexRootCache = rds.reachabilityReindexRootStaging
|
||||
}
|
||||
for hash, reachabilityData := range rds.reachabilityDataStaging {
|
||||
reachabilityDataBytes, err := rds.serializeReachabilityData(reachabilityData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(rds.reachabilityDataBlockHashAsKey(&hash), reachabilityDataBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rds.reachabilityDataCache.Add(&hash, reachabilityData)
|
||||
}
|
||||
|
||||
rds.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReachabilityData returns the reachabilityData associated with the given blockHash
|
||||
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
|
||||
stagingShard := rds.stagingShard(stagingArea)
|
||||
func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader,
|
||||
blockHash *externalapi.DomainHash) (model.ReachabilityData, error) {
|
||||
|
||||
if reachabilityData, ok := stagingShard.reachabilityData[*blockHash]; ok {
|
||||
if reachabilityData, ok := rds.reachabilityDataStaging[*blockHash]; ok {
|
||||
return reachabilityData, nil
|
||||
}
|
||||
|
||||
@@ -68,10 +102,8 @@ func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, sta
|
||||
return reachabilityData, nil
|
||||
}
|
||||
|
||||
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
stagingShard := rds.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.reachabilityData[*blockHash]; ok {
|
||||
func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if _, ok := rds.reachabilityDataStaging[*blockHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -83,11 +115,9 @@ func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader,
|
||||
}
|
||||
|
||||
// ReachabilityReindexRoot returns the current reachability reindex root
|
||||
func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) {
|
||||
stagingShard := rds.stagingShard(stagingArea)
|
||||
|
||||
if stagingShard.reachabilityReindexRoot != nil {
|
||||
return stagingShard.reachabilityReindexRoot, nil
|
||||
func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBReader) (*externalapi.DomainHash, error) {
|
||||
if rds.reachabilityReindexRootStaging != nil {
|
||||
return rds.reachabilityReindexRootStaging, nil
|
||||
}
|
||||
|
||||
if rds.reachabilityReindexRootCache != nil {
|
||||
@@ -1,74 +0,0 @@
|
||||
package utxodiffstore
|
||||
|
||||
import (
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model"
|
||||
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
)
|
||||
|
||||
type utxoDiffStagingShard struct {
|
||||
store *utxoDiffStore
|
||||
utxoDiffToAdd map[externalapi.DomainHash]externalapi.UTXODiff
|
||||
utxoDiffChildToAdd map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) stagingShard(stagingArea *model.StagingArea) *utxoDiffStagingShard {
|
||||
return stagingArea.GetOrCreateShard(model.StagingShardIDUTXODiff, func() model.StagingShard {
|
||||
return &utxoDiffStagingShard{
|
||||
store: uds,
|
||||
utxoDiffToAdd: make(map[externalapi.DomainHash]externalapi.UTXODiff),
|
||||
utxoDiffChildToAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
}
|
||||
}).(*utxoDiffStagingShard)
|
||||
}
|
||||
|
||||
func (udss *utxoDiffStagingShard) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, utxoDiff := range udss.utxoDiffToAdd {
|
||||
utxoDiffBytes, err := udss.store.serializeUTXODiff(utxoDiff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(udss.store.utxoDiffHashAsKey(&hash), utxoDiffBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
udss.store.utxoDiffCache.Add(&hash, utxoDiff)
|
||||
}
|
||||
|
||||
for hash, utxoDiffChild := range udss.utxoDiffChildToAdd {
|
||||
if utxoDiffChild == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
utxoDiffChildBytes, err := udss.store.serializeUTXODiffChild(utxoDiffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(udss.store.utxoDiffChildHashAsKey(&hash), utxoDiffChildBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
udss.store.utxoDiffChildCache.Add(&hash, utxoDiffChild)
|
||||
}
|
||||
|
||||
for hash := range udss.toDelete {
|
||||
err := dbTx.Delete(udss.store.utxoDiffHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
udss.store.utxoDiffCache.Remove(&hash)
|
||||
|
||||
err = dbTx.Delete(udss.store.utxoDiffChildHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
udss.store.utxoDiffChildCache.Remove(&hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (udss *utxoDiffStagingShard) isStaged() bool {
|
||||
return len(udss.utxoDiffToAdd) != 0 || len(udss.utxoDiffChildToAdd) != 0 || len(udss.toDelete) != 0
|
||||
}
|
||||
@@ -15,46 +15,100 @@ var utxoDiffChildBucket = database.MakeBucket([]byte("utxo-diff-children"))
|
||||
|
||||
// utxoDiffStore represents a store of UTXODiffs
|
||||
type utxoDiffStore struct {
|
||||
utxoDiffCache *lrucache.LRUCache
|
||||
utxoDiffChildCache *lrucache.LRUCache
|
||||
utxoDiffStaging map[externalapi.DomainHash]externalapi.UTXODiff
|
||||
utxoDiffChildStaging map[externalapi.DomainHash]*externalapi.DomainHash
|
||||
toDelete map[externalapi.DomainHash]struct{}
|
||||
utxoDiffCache *lrucache.LRUCache
|
||||
utxoDiffChildCache *lrucache.LRUCache
|
||||
}
|
||||
|
||||
// New instantiates a new UTXODiffStore
|
||||
func New(cacheSize int, preallocate bool) model.UTXODiffStore {
|
||||
return &utxoDiffStore{
|
||||
utxoDiffCache: lrucache.New(cacheSize, preallocate),
|
||||
utxoDiffChildCache: lrucache.New(cacheSize, preallocate),
|
||||
utxoDiffStaging: make(map[externalapi.DomainHash]externalapi.UTXODiff),
|
||||
utxoDiffChildStaging: make(map[externalapi.DomainHash]*externalapi.DomainHash),
|
||||
toDelete: make(map[externalapi.DomainHash]struct{}),
|
||||
utxoDiffCache: lrucache.New(cacheSize, preallocate),
|
||||
utxoDiffChildCache: lrucache.New(cacheSize, preallocate),
|
||||
}
|
||||
}
|
||||
|
||||
// Stage stages the given utxoDiff for the given blockHash
|
||||
func (uds *utxoDiffStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, utxoDiff externalapi.UTXODiff, utxoDiffChild *externalapi.DomainHash) {
|
||||
stagingShard := uds.stagingShard(stagingArea)
|
||||
|
||||
stagingShard.utxoDiffToAdd[*blockHash] = utxoDiff
|
||||
func (uds *utxoDiffStore) Stage(blockHash *externalapi.DomainHash, utxoDiff externalapi.UTXODiff, utxoDiffChild *externalapi.DomainHash) {
|
||||
uds.utxoDiffStaging[*blockHash] = utxoDiff
|
||||
|
||||
if utxoDiffChild != nil {
|
||||
stagingShard.utxoDiffChildToAdd[*blockHash] = utxoDiffChild
|
||||
uds.utxoDiffChildStaging[*blockHash] = utxoDiffChild
|
||||
}
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) IsStaged(stagingArea *model.StagingArea) bool {
|
||||
return uds.stagingShard(stagingArea).isStaged()
|
||||
func (uds *utxoDiffStore) IsStaged() bool {
|
||||
return len(uds.utxoDiffStaging) != 0 || len(uds.utxoDiffChildStaging) != 0 || len(uds.toDelete) != 0
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) isBlockHashStaged(stagingShard *utxoDiffStagingShard, blockHash *externalapi.DomainHash) bool {
|
||||
if _, ok := stagingShard.utxoDiffToAdd[*blockHash]; ok {
|
||||
func (uds *utxoDiffStore) IsBlockHashStaged(blockHash *externalapi.DomainHash) bool {
|
||||
if _, ok := uds.utxoDiffStaging[*blockHash]; ok {
|
||||
return true
|
||||
}
|
||||
_, ok := stagingShard.utxoDiffChildToAdd[*blockHash]
|
||||
_, ok := uds.utxoDiffChildStaging[*blockHash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// UTXODiff gets the utxoDiff associated with the given blockHash
|
||||
func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.UTXODiff, error) {
|
||||
stagingShard := uds.stagingShard(stagingArea)
|
||||
func (uds *utxoDiffStore) Discard() {
|
||||
uds.utxoDiffStaging = make(map[externalapi.DomainHash]externalapi.UTXODiff)
|
||||
uds.utxoDiffChildStaging = make(map[externalapi.DomainHash]*externalapi.DomainHash)
|
||||
uds.toDelete = make(map[externalapi.DomainHash]struct{})
|
||||
}
|
||||
|
||||
if utxoDiff, ok := stagingShard.utxoDiffToAdd[*blockHash]; ok {
|
||||
func (uds *utxoDiffStore) Commit(dbTx model.DBTransaction) error {
|
||||
for hash, utxoDiff := range uds.utxoDiffStaging {
|
||||
utxoDiffBytes, err := uds.serializeUTXODiff(utxoDiff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(uds.utxoDiffHashAsKey(&hash), utxoDiffBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffCache.Add(&hash, utxoDiff)
|
||||
}
|
||||
for hash, utxoDiffChild := range uds.utxoDiffChildStaging {
|
||||
if utxoDiffChild == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
utxoDiffChildBytes, err := uds.serializeUTXODiffChild(utxoDiffChild)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dbTx.Put(uds.utxoDiffChildHashAsKey(&hash), utxoDiffChildBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffChildCache.Add(&hash, utxoDiffChild)
|
||||
}
|
||||
|
||||
for hash := range uds.toDelete {
|
||||
err := dbTx.Delete(uds.utxoDiffHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffCache.Remove(&hash)
|
||||
|
||||
err = dbTx.Delete(uds.utxoDiffChildHashAsKey(&hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uds.utxoDiffChildCache.Remove(&hash)
|
||||
}
|
||||
|
||||
uds.Discard()
|
||||
return nil
|
||||
}
|
||||
|
||||
// UTXODiff gets the utxoDiff associated with the given blockHash
|
||||
func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, blockHash *externalapi.DomainHash) (externalapi.UTXODiff, error) {
|
||||
if utxoDiff, ok := uds.utxoDiffStaging[*blockHash]; ok {
|
||||
return utxoDiff, nil
|
||||
}
|
||||
|
||||
@@ -76,10 +130,8 @@ func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, stagingArea *model.
|
||||
}
|
||||
|
||||
// UTXODiffChild gets the utxoDiff child associated with the given blockHash
|
||||
func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
stagingShard := uds.stagingShard(stagingArea)
|
||||
|
||||
if utxoDiffChild, ok := stagingShard.utxoDiffChildToAdd[*blockHash]; ok {
|
||||
func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) {
|
||||
if utxoDiffChild, ok := uds.utxoDiffChildStaging[*blockHash]; ok {
|
||||
return utxoDiffChild, nil
|
||||
}
|
||||
|
||||
@@ -101,10 +153,8 @@ func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, stagingArea *m
|
||||
}
|
||||
|
||||
// HasUTXODiffChild returns true if the given blockHash has a UTXODiffChild
|
||||
func (uds *utxoDiffStore) HasUTXODiffChild(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
stagingShard := uds.stagingShard(stagingArea)
|
||||
|
||||
if _, ok := stagingShard.utxoDiffChildToAdd[*blockHash]; ok {
|
||||
func (uds *utxoDiffStore) HasUTXODiffChild(dbContext model.DBReader, blockHash *externalapi.DomainHash) (bool, error) {
|
||||
if _, ok := uds.utxoDiffChildStaging[*blockHash]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -116,19 +166,17 @@ func (uds *utxoDiffStore) HasUTXODiffChild(dbContext model.DBReader, stagingArea
|
||||
}
|
||||
|
||||
// Delete deletes the utxoDiff associated with the given blockHash
|
||||
func (uds *utxoDiffStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) {
|
||||
stagingShard := uds.stagingShard(stagingArea)
|
||||
|
||||
if uds.isBlockHashStaged(stagingShard, blockHash) {
|
||||
if _, ok := stagingShard.utxoDiffToAdd[*blockHash]; ok {
|
||||
delete(stagingShard.utxoDiffToAdd, *blockHash)
|
||||
func (uds *utxoDiffStore) Delete(blockHash *externalapi.DomainHash) {
|
||||
if uds.IsBlockHashStaged(blockHash) {
|
||||
if _, ok := uds.utxoDiffStaging[*blockHash]; ok {
|
||||
delete(uds.utxoDiffStaging, *blockHash)
|
||||
}
|
||||
if _, ok := stagingShard.utxoDiffChildToAdd[*blockHash]; ok {
|
||||
delete(stagingShard.utxoDiffChildToAdd, *blockHash)
|
||||
if _, ok := uds.utxoDiffChildStaging[*blockHash]; ok {
|
||||
delete(uds.utxoDiffChildStaging, *blockHash)
|
||||
}
|
||||
return
|
||||
}
|
||||
stagingShard.toDelete[*blockHash] = struct{}{}
|
||||
uds.toDelete[*blockHash] = struct{}{}
|
||||
}
|
||||
|
||||
func (uds *utxoDiffStore) utxoDiffHashAsKey(hash *externalapi.DomainHash) model.DBKey {
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
daablocksstore "github.com/kaspanet/kaspad/domain/consensus/datastructures/daablocksstore"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/datastructures/headersselectedchainstore"
|
||||
|
||||
"github.com/kaspanet/kaspad/domain/consensus/processes/dagtraversalmanager"
|
||||
@@ -124,20 +122,10 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
reachabilityDataStore := reachabilitydatastore.New(pruningWindowSizePlusFinalityDepthForCache, preallocateCaches)
|
||||
utxoDiffStore := utxodiffstore.New(200, preallocateCaches)
|
||||
consensusStateStore := consensusstatestore.New(10_000, preallocateCaches)
|
||||
|
||||
// Some tests artificially decrease the pruningWindowSize, thus making the GhostDagStore cache too small for a
|
||||
// a single DifficultyAdjustmentWindow. To alleviate this problem we make sure that the cache size is at least
|
||||
// dagParams.DifficultyAdjustmentWindowSize
|
||||
ghostdagDataCacheSize := pruningWindowSizeForCaches
|
||||
if ghostdagDataCacheSize < dagParams.DifficultyAdjustmentWindowSize {
|
||||
ghostdagDataCacheSize = dagParams.DifficultyAdjustmentWindowSize
|
||||
}
|
||||
ghostdagDataStore := ghostdagdatastore.New(ghostdagDataCacheSize, preallocateCaches)
|
||||
|
||||
ghostdagDataStore := ghostdagdatastore.New(pruningWindowSizeForCaches, preallocateCaches)
|
||||
headersSelectedTipStore := headersselectedtipstore.New()
|
||||
finalityStore := finalitystore.New(200, preallocateCaches)
|
||||
headersSelectedChainStore := headersselectedchainstore.New(pruningWindowSizeForCaches, preallocateCaches)
|
||||
daaBlocksStore := daablocksstore.New(pruningWindowSizeForCaches, int(dagParams.FinalityDepth()), preallocateCaches)
|
||||
|
||||
// Processes
|
||||
reachabilityManager := reachabilitymanager.New(
|
||||
@@ -161,15 +149,13 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
ghostdagDataStore,
|
||||
reachabilityDataStore,
|
||||
ghostdagManager,
|
||||
consensusStateStore,
|
||||
dagParams.GenesisHash)
|
||||
consensusStateStore)
|
||||
pastMedianTimeManager := f.pastMedianTimeConsructor(
|
||||
dagParams.TimestampDeviationTolerance,
|
||||
dbManager,
|
||||
dagTraversalManager,
|
||||
blockHeaderStore,
|
||||
ghostdagDataStore,
|
||||
dagParams.GenesisHash)
|
||||
ghostdagDataStore)
|
||||
transactionValidator := transactionvalidator.New(dagParams.BlockCoinbaseMaturity,
|
||||
dagParams.EnableNonNativeSubnetworks,
|
||||
dagParams.MassPerTxByte,
|
||||
@@ -178,14 +164,12 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dagParams.MaxCoinbasePayloadLength,
|
||||
dbManager,
|
||||
pastMedianTimeManager,
|
||||
ghostdagDataStore,
|
||||
daaBlocksStore)
|
||||
ghostdagDataStore)
|
||||
difficultyManager := f.difficultyConstructor(
|
||||
dbManager,
|
||||
ghostdagManager,
|
||||
ghostdagDataStore,
|
||||
blockHeaderStore,
|
||||
daaBlocksStore,
|
||||
dagTopologyManager,
|
||||
dagTraversalManager,
|
||||
dagParams.PowMax,
|
||||
@@ -199,8 +183,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
dagParams.BaseSubsidy,
|
||||
dagParams.CoinbasePayloadScriptPublicKeyMaxLength,
|
||||
ghostdagDataStore,
|
||||
acceptanceDataStore,
|
||||
daaBlocksStore)
|
||||
acceptanceDataStore)
|
||||
headerTipsManager := headersselectedtipmanager.New(dbManager, dagTopologyManager, dagTraversalManager,
|
||||
ghostdagManager, headersSelectedTipStore, headersSelectedChainStore)
|
||||
genesisHash := dagParams.GenesisHash
|
||||
@@ -265,7 +248,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
coinbaseManager,
|
||||
mergeDepthManager,
|
||||
finalityManager,
|
||||
difficultyManager,
|
||||
|
||||
blockStatusStore,
|
||||
ghostdagDataStore,
|
||||
@@ -277,8 +259,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
acceptanceDataStore,
|
||||
blockHeaderStore,
|
||||
headersSelectedTipStore,
|
||||
pruningStore,
|
||||
daaBlocksStore)
|
||||
pruningStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -298,7 +279,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
blockStore,
|
||||
blockHeaderStore,
|
||||
utxoDiffStore,
|
||||
daaBlocksStore,
|
||||
isArchivalNode,
|
||||
genesisHash,
|
||||
dagParams.FinalityDepth(),
|
||||
@@ -363,8 +343,7 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
blockHeaderStore,
|
||||
headersSelectedTipStore,
|
||||
finalityStore,
|
||||
headersSelectedChainStore,
|
||||
daaBlocksStore)
|
||||
headersSelectedChainStore)
|
||||
|
||||
c := &consensus{
|
||||
lock: &sync.Mutex{},
|
||||
@@ -402,7 +381,6 @@ func (f *factory) NewConsensus(dagParams *dagconfig.Params, db infrastructuredat
|
||||
utxoDiffStore: utxoDiffStore,
|
||||
finalityStore: finalityStore,
|
||||
headersSelectedChainStore: headersSelectedChainStore,
|
||||
daaBlocksStore: daaBlocksStore,
|
||||
}
|
||||
|
||||
genesisInfo, err := c.GetBlockInfo(genesisHash)
|
||||
|
||||
@@ -83,8 +83,6 @@ func TestFinality(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
|
||||
// Add two more blocks in the side-chain until it becomes the selected chain
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
sideChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{sideChainTipHash})
|
||||
@@ -122,7 +120,7 @@ func TestFinality(t *testing.T) {
|
||||
mainChainTipHash = consensushashing.BlockHash(mainChainTip)
|
||||
}
|
||||
|
||||
virtualFinality, err := consensus.FinalityManager().VirtualFinalityPoint(stagingArea)
|
||||
virtualFinality, err := consensus.FinalityManager().VirtualFinalityPoint()
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting the virtual's finality point: %v", err)
|
||||
}
|
||||
@@ -147,14 +145,12 @@ func TestFinality(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting virtual selectedParent: %v", err)
|
||||
}
|
||||
selectedTipGhostDagData, err :=
|
||||
consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), stagingArea, selectedTip)
|
||||
selectedTipGhostDagData, err := consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), selectedTip)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting the ghost dag data of the selected tip: %v", err)
|
||||
}
|
||||
|
||||
sideChainTipGhostDagData, err :=
|
||||
consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), stagingArea, sideChainTipHash)
|
||||
sideChainTipGhostDagData, err := consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), sideChainTipHash)
|
||||
if err != nil {
|
||||
t.Fatalf("TestFinality: Failed getting the ghost dag data of the sidechain tip: %v", err)
|
||||
}
|
||||
@@ -306,9 +302,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected blueKosherizingBlock to not violate merge depth")
|
||||
}
|
||||
|
||||
stagingArea := model.NewStagingArea()
|
||||
virtualGhotDagData, err := consensusReal.GHOSTDAGDataStore().Get(consensusReal.DatabaseContext(),
|
||||
stagingArea, model.VirtualBlockHash)
|
||||
virtualGhotDagData, err := consensusReal.GHOSTDAGDataStore().Get(consensusReal.DatabaseContext(), model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed getting the ghostdag data of the virtual: %v", err)
|
||||
}
|
||||
@@ -356,8 +350,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", tip, virtualSelectedParent)
|
||||
}
|
||||
|
||||
virtualGhotDagData, err = consensusReal.GHOSTDAGDataStore().Get(
|
||||
consensusReal.DatabaseContext(), stagingArea, model.VirtualBlockHash)
|
||||
virtualGhotDagData, err = consensusReal.GHOSTDAGDataStore().Get(consensusReal.DatabaseContext(), model.VirtualBlockHash)
|
||||
if err != nil {
|
||||
t.Fatalf("TestBoundedMergeDepth: Failed getting the ghostdag data of the virtual: %v", err)
|
||||
}
|
||||
@@ -379,8 +372,7 @@ func TestBoundedMergeDepth(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now `pointAtBlueKosherizing` itself is actually still blue, so we can still point at that even though we can't point at kosherizing directly anymore
|
||||
transitiveBlueKosherizing, isViolatingMergeDepth :=
|
||||
checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(pointAtBlueKosherizing), tip})
|
||||
transitiveBlueKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(pointAtBlueKosherizing), tip})
|
||||
if isViolatingMergeDepth {
|
||||
t.Fatalf("TestBoundedMergeDepth: Expected transitiveBlueKosherizing to not violate merge depth")
|
||||
}
|
||||
|
||||
@@ -26,9 +26,14 @@ func initTestTransactionAcceptanceDataForClone() []*externalapi.TransactionAccep
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -70,9 +75,14 @@ func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStru
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -99,9 +109,14 @@ func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStru
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -127,9 +142,14 @@ func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStru
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -155,9 +175,14 @@ func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStru
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -184,9 +209,14 @@ func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStru
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -214,9 +244,14 @@ func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStru
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -319,6 +354,11 @@ func initTestBlockAcceptanceDataForClone() []*externalapi.BlockAcceptanceData {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -366,6 +406,11 @@ func iniBlockAcceptanceDataForEqual() []testBlockAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -398,6 +443,11 @@ func iniBlockAcceptanceDataForEqual() []testBlockAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -430,6 +480,11 @@ func iniBlockAcceptanceDataForEqual() []testBlockAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -462,6 +517,11 @@ func iniBlockAcceptanceDataForEqual() []testBlockAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -495,6 +555,11 @@ func iniBlockAcceptanceDataForEqual() []testBlockAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -598,6 +663,11 @@ func initTestAcceptanceDataForClone() []externalapi.AcceptanceData {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -647,6 +717,11 @@ func initAcceptanceDataForEqual() []testAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -679,6 +754,11 @@ func initAcceptanceDataForEqual() []testAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -711,6 +791,11 @@ func initAcceptanceDataForEqual() []testAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -743,6 +828,11 @@ func initAcceptanceDataForEqual() []testAcceptanceDataStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
|
||||
@@ -26,9 +26,14 @@ func initTestBaseTransactions() []*externalapi.DomainTransaction {
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -47,9 +52,14 @@ func initTestAnotherTransactions() []*externalapi.DomainTransaction {
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x02},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -68,9 +78,14 @@ func initTestTwoTransactions() []*externalapi.DomainTransaction {
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -83,9 +98,14 @@ func initTestTwoTransactions() []*externalapi.DomainTransaction {
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
@@ -9,10 +9,10 @@ type Consensus interface {
|
||||
GetBlock(blockHash *DomainHash) (*DomainBlock, error)
|
||||
GetBlockHeader(blockHash *DomainHash) (BlockHeader, error)
|
||||
GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error)
|
||||
GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, selectedParent *DomainHash, children []*DomainHash, err error)
|
||||
GetBlockChildren(blockHash *DomainHash) ([]*DomainHash, error)
|
||||
GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error)
|
||||
|
||||
GetHashesBetween(lowHash, highHash *DomainHash, maxBlueScoreDifference uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error)
|
||||
GetHashesBetween(lowHash, highHash *DomainHash, maxBlueScoreDifference uint64) ([]*DomainHash, error)
|
||||
GetMissingBlockBodyHashes(highHash *DomainHash) ([]*DomainHash, error)
|
||||
GetPruningPointUTXOs(expectedPruningPointHash *DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
|
||||
GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error)
|
||||
|
||||
@@ -15,11 +15,6 @@ type DomainHash struct {
|
||||
hashArray [DomainHashSize]byte
|
||||
}
|
||||
|
||||
// NewZeroHash returns a DomainHash that represents the zero value (0x000000...000)
|
||||
func NewZeroHash() *DomainHash {
|
||||
return &DomainHash{hashArray: [32]byte{}}
|
||||
}
|
||||
|
||||
// NewDomainHashFromByteArray constructs a new DomainHash out of a byte array
|
||||
func NewDomainHashFromByteArray(hashBytes *[DomainHashSize]byte) *DomainHash {
|
||||
return &DomainHash{
|
||||
|
||||
@@ -15,6 +15,7 @@ type DomainTransaction struct {
|
||||
LockTime uint64
|
||||
SubnetworkID DomainSubnetworkID
|
||||
Gas uint64
|
||||
PayloadHash DomainHash
|
||||
Payload []byte
|
||||
|
||||
Fee uint64
|
||||
@@ -52,6 +53,7 @@ func (tx *DomainTransaction) Clone() *DomainTransaction {
|
||||
LockTime: tx.LockTime,
|
||||
SubnetworkID: *tx.SubnetworkID.Clone(),
|
||||
Gas: tx.Gas,
|
||||
PayloadHash: tx.PayloadHash,
|
||||
Payload: payloadClone,
|
||||
Fee: tx.Fee,
|
||||
Mass: tx.Mass,
|
||||
@@ -62,7 +64,7 @@ func (tx *DomainTransaction) Clone() *DomainTransaction {
|
||||
// If this doesn't compile, it means the type definition has been changed, so it's
|
||||
// an indication to update Equal and Clone accordingly.
|
||||
var _ = DomainTransaction{0, []*DomainTransactionInput{}, []*DomainTransactionOutput{}, 0,
|
||||
DomainSubnetworkID{}, 0, []byte{}, 0, 0,
|
||||
DomainSubnetworkID{}, 0, DomainHash{}, []byte{}, 0, 0,
|
||||
&DomainTransactionID{}}
|
||||
|
||||
// Equal returns whether tx equals to other
|
||||
@@ -107,6 +109,10 @@ func (tx *DomainTransaction) Equal(other *DomainTransaction) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if !tx.PayloadHash.Equal(&other.PayloadHash) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(tx.Payload, other.Payload) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -79,6 +79,10 @@ func initTestBaseTransaction() *externalapi.DomainTransaction {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -93,6 +97,34 @@ func initTestBaseTransaction() *externalapi.DomainTransaction {
|
||||
func initTestTransactionToCompare() []*transactionToCompare {
|
||||
|
||||
testTx := []*transactionToCompare{{
|
||||
tx: &externalapi.DomainTransaction{
|
||||
1,
|
||||
[]*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{
|
||||
*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF},
|
||||
[]byte{1, 2, 3},
|
||||
uint64(0xFFFFFFFF),
|
||||
utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}},
|
||||
[]*externalapi.DomainTransactionOutput{{uint64(0xFFFF),
|
||||
&externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}},
|
||||
{uint64(0xFFFF),
|
||||
&externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}},
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), //Changed
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}),
|
||||
},
|
||||
expectedResult: false,
|
||||
}, {
|
||||
tx: &externalapi.DomainTransaction{
|
||||
1,
|
||||
[]*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{
|
||||
@@ -107,6 +139,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -131,6 +167,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01, 0x02}, //Changed
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -155,6 +195,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01, 0x02}, //Changed
|
||||
0,
|
||||
1,
|
||||
@@ -178,6 +222,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -204,6 +252,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -229,6 +281,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
1000000000, //Changed
|
||||
1,
|
||||
@@ -252,6 +308,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -275,6 +335,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
2, //Changed
|
||||
@@ -298,6 +362,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
2, //Changed
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -326,6 +394,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -350,6 +422,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -373,6 +449,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -393,6 +473,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -418,6 +502,10 @@ func initTestTransactionToCompare() []*transactionToCompare {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
2, // Changed
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
@@ -449,9 +537,13 @@ func initTestDomainTransactionForClone() []*externalapi.DomainTransaction {
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 5555555555,
|
||||
Mass: 1,
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 5555555555,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@@ -463,10 +555,14 @@ func initTestDomainTransactionForClone() []*externalapi.DomainTransaction {
|
||||
LockTime: 1,
|
||||
SubnetworkID: externalapi.DomainSubnetworkID{0x01},
|
||||
Gas: 1,
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{}),
|
||||
PayloadHash: *externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}),
|
||||
Payload: []byte{0x01},
|
||||
Fee: 0,
|
||||
Mass: 1,
|
||||
ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{}),
|
||||
},
|
||||
}
|
||||
return tests
|
||||
@@ -482,6 +578,27 @@ func initTestDomainTransactionForEqual() []testDomainTransactionStruct {
|
||||
{
|
||||
baseTx: nil,
|
||||
transactionToCompareTo: []*transactionToCompare{{
|
||||
tx: &externalapi.DomainTransaction{
|
||||
1,
|
||||
[]*externalapi.DomainTransactionInput{},
|
||||
[]*externalapi.DomainTransactionOutput{},
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
0,
|
||||
1,
|
||||
externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}),
|
||||
},
|
||||
expectedResult: false,
|
||||
}, {
|
||||
tx: nil,
|
||||
expectedResult: true}},
|
||||
}, {
|
||||
@@ -492,6 +609,10 @@ func initTestDomainTransactionForEqual() []testDomainTransactionStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
1,
|
||||
1,
|
||||
@@ -511,6 +632,10 @@ func initTestDomainTransactionForEqual() []testDomainTransactionStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
0,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
1,
|
||||
1,
|
||||
@@ -525,6 +650,10 @@ func initTestDomainTransactionForEqual() []testDomainTransactionStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
1,
|
||||
1,
|
||||
@@ -539,6 +668,10 @@ func initTestDomainTransactionForEqual() []testDomainTransactionStruct {
|
||||
1,
|
||||
externalapi.DomainSubnetworkID{0x01},
|
||||
1,
|
||||
*externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}),
|
||||
[]byte{0x01},
|
||||
2, // Changed fee
|
||||
1,
|
||||
|
||||
@@ -27,5 +27,5 @@ type MutableUTXODiff interface {
|
||||
ToRemove() UTXOCollection
|
||||
|
||||
WithDiffInPlace(other UTXODiff) error
|
||||
AddTransaction(transaction *DomainTransaction, blockDAAScore uint64) error
|
||||
AddTransaction(transaction *DomainTransaction, blockBlueScore uint64) error
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ package externalapi
|
||||
type UTXOEntry interface {
|
||||
Amount() uint64
|
||||
ScriptPublicKey() *ScriptPublicKey // The public key script for the output.
|
||||
BlockDAAScore() uint64 // Blue score of the block accepting the tx.
|
||||
BlockBlueScore() uint64 // Blue score of the block accepting the tx.
|
||||
IsCoinbase() bool
|
||||
Equal(other UTXOEntry) bool
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
|
||||
// AcceptanceDataStore represents a store of AcceptanceData
|
||||
type AcceptanceDataStore interface {
|
||||
Store
|
||||
Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData)
|
||||
IsStaged(stagingArea *StagingArea) bool
|
||||
Get(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error)
|
||||
Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash)
|
||||
Stage(blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData)
|
||||
IsStaged() bool
|
||||
Get(dbContext DBReader, blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error)
|
||||
Delete(blockHash *externalapi.DomainHash)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user