kaspad/blockdag/dagio.go
stasatdaglabs d33d633e77 [DEV-307] Create the sub-network registry (#147)
* [DEV-307] Implemented adding pending registry transactions.

* [DEV-307] Implemented a skeleton for the sub-network registry algorithm.

* [DEV-307] Implemented the serialization/deserialization logic for pending sub-network transactions.

* [DEV-307] Implemented marking sub-network registry transactions as successfully registered.

* [DEV-307] Implemented clearing pending sub-network entries and writing registered sub-networks.

* [DEV-307] Added comments for all the dagio functions.

* [DEV-307] Fixed a couple of bugs and added a test for checking serialization/deserialization of pending sub-network transactions.

* [DEV-307] Implemented getting a registered sub-network's gas limit.

* [DEV-307] Updated sub-network announcement transaction validation rules.

* [DEV-307] Specified what is validated.

* [DEV-307] Added initialization for lastSubNetworkID.

* [DEV-307] Renamed extractValidSubNetworkRegistryTxs to validateAndExtractSubNetworkRegistryTxs to better reflect what it does.

* Dev 303 implement block finality (#139)

* [DEV-303] Implement block finality

* [DEV-303] Add finality tests

* [DEV-303] Make finality tests use maybeAcceptBlock

* [DEV-303] Only check finality rules if we are fastAdd mode

* [DEV-303] replace hasBiggerFinalityScoreThan checks with .finalityScore()

* [DEV-307] Forgot to actually create the sub-network buckets.

* [DEV-307] Wrote a full-flow test for sub-network registry and fixed a couple of bugs that it had uncovered.

* [DEV-307] Took firstSubNetworkID out of dagconfig.Params. It's unnecessary there.

* [DEV-307] Added persistance for dag.lastSubNetworkID.

* [DEV-307] Moved sub-network stuff to their own files. Got rid of firstSubNetworkID. Replaced SubNetworkReservedLast with SubNetworkUnreservedFirst. Added ErrSubNetworkRegistry.

* [DEV-307] Added a newline at the end of dag_test.go.

* [DEV-307] Renamed previousFinalityPoint to initialFinalityPoint.

* [DEV-307] Moved sub-network IO stuff to subnetworks.go and subnetworks_test.go.

* [DEV-307] Merged dbPutRegisteredSubNetworkTx and dbRegisterSubNetwork. Fixed a too-big allocation and a block double-processing bug.

* [DEV-307] Simplified the serialized representation of a slice of transactions.

* [DEV-307] Fixed comments.
2019-01-07 17:10:23 +02:00

1108 lines
37 KiB
Go

// Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockdag
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"sync"
"github.com/daglabs/btcd/dagconfig/daghash"
"github.com/daglabs/btcd/database"
"github.com/daglabs/btcd/util"
"github.com/daglabs/btcd/wire"
)
const (
// blockHdrSize is the size of a block header. This is simply the
// constant from wire and is only provided here for convenience since
// wire.MaxBlockHeaderPayload is quite long.
blockHdrSize = wire.MaxBlockHeaderPayload
// latestUTXOSetBucketVersion is the current version of the UTXO set
// bucket that is used to track all unspent outputs.
latestUTXOSetBucketVersion = 1
)
var (
// blockIndexBucketName is the name of the db bucket used to house to the
// block headers and contextual information.
blockIndexBucketName = []byte("blockheaderidx")
// hashIndexBucketName is the name of the db bucket used to house to the
// block hash -> block height index.
hashIndexBucketName = []byte("hashidx")
// heightIndexBucketName is the name of the db bucket used to house to
// the block height -> block hash index.
heightIndexBucketName = []byte("heightidx")
// dagStateKeyName is the name of the db key used to store the DAG
// tip hashes.
dagStateKeyName = []byte("dagstate")
// utxoSetVersionKeyName is the name of the db key used to store the
// version of the utxo set currently in the database.
utxoSetVersionKeyName = []byte("utxosetversion")
// utxoSetBucketName is the name of the db bucket used to house the
// unspent transaction output set.
utxoSetBucketName = []byte("utxoset")
// pendingSubNetworksBucketName is the name of the db bucket used to store the
// pending sub-networks.
pendingSubNetworksBucketName = []byte("pendingsubnetworks")
// registeredSubNetworkTxsBucketName is the name of the db bucket used to house
// the transactions that have been used to register sub-networks.
registeredSubNetworkTxsBucketName = []byte("registeredsubnetworktxs")
// subNetworksBucketName is the name of the db bucket used to store the
// sub-network registry.
subNetworksBucketName = []byte("subnetworks")
// byteOrder is the preferred byte order used for serializing numeric
// fields for storage in the database.
byteOrder = binary.LittleEndian
)
// errNotInDAG signifies that a block hash or height that is not in the
// DAG was requested.
type errNotInDAG string
// Error implements the error interface.
func (e errNotInDAG) Error() string {
return string(e)
}
// isNotInDAGErr returns whether or not the passed error is an
// errNotInDAG error.
func isNotInDAGErr(err error) bool {
_, ok := err.(errNotInDAG)
return ok
}
// errDeserialize signifies that a problem was encountered when deserializing
// data.
type errDeserialize string
// Error implements the error interface.
func (e errDeserialize) Error() string {
return string(e)
}
// isDeserializeErr returns whether or not the passed error is an errDeserialize
// error.
func isDeserializeErr(err error) bool {
_, ok := err.(errDeserialize)
return ok
}
// dbPutVersion uses an existing database transaction to update the provided
// key in the metadata bucket to the given version. It is primarily used to
// track versions on entities such as buckets.
func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
var serialized [4]byte
byteOrder.PutUint32(serialized[:], version)
return dbTx.Metadata().Put(key, serialized[:])
}
// -----------------------------------------------------------------------------
// The transaction spend journal consists of an entry for each block connected
// to the main chain which contains the transaction outputs the block spends
// serialized such that the order is the reverse of the order they were spent.
//
// This is required because reorganizing the chain necessarily entails
// disconnecting blocks to get back to the point of the fork which implies
// unspending all of the transaction outputs that each block previously spent.
// Since the UTXO set, by definition, only contains unspent transaction outputs,
// the spent transaction outputs must be resurrected from somewhere. There is
// more than one way this could be done, however this is the most straight
// forward method that does not require having a transaction index and unpruned
// blockchain.
//
// NOTE: This format is NOT self describing. The additional details such as
// the number of entries (transaction inputs) are expected to come from the
// block itself and the UTXO set (for legacy entries). The rationale in doing
// this is to save space. This is also the reason the spent outputs are
// serialized in the reverse order they are spent because later transactions are
// allowed to spend outputs from earlier ones in the same block.
//
// The reserved field below used to keep track of the version of the containing
// transaction when the height in the header code was non-zero, however the
// height is always non-zero now, but keeping the extra reserved field allows
// backwards compatibility.
//
// The serialized format is:
//
// [<header code><reserved><compressed txout>],...
//
// Field Type Size
// header code VLQ variable
// reserved byte 1
// compressed txout
// compressed amount VLQ variable
// compressed script []byte variable
//
// The serialized header code format is:
// bit 0 - containing transaction is a coinbase
// bits 1-x - height of the block that contains the spent txout
//
// Example 1:
// From block 170 in main blockchain.
//
// 1300320511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c
// <><><------------------------------------------------------------------>
// | | |
// | reserved compressed txout
// header code
//
// - header code: 0x13 (coinbase, height 9)
// - reserved: 0x00
// - compressed txout 0:
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
// - 0x05: special script type pay-to-pubkey
// - 0x11...5c: x-coordinate of the pubkey
//
// Example 2:
// Adapted from block 100025 in main blockchain.
//
// 8b99700091f20f006edbc6c4d31bae9f1ccc38538a114bf42de65e868b99700086c64700b2fb57eadf61e106a100a7445a8c3f67898841ec
// <----><><----------------------------------------------><----><><---------------------------------------------->
// | | | | | |
// | reserved compressed txout | reserved compressed txout
// header code header code
//
// - Last spent output:
// - header code: 0x8b9970 (not coinbase, height 100024)
// - reserved: 0x00
// - compressed txout:
// - 0x91f20f: VLQ-encoded compressed amount for 34405000000 (344.05 BTC)
// - 0x00: special script type pay-to-pubkey-hash
// - 0x6e...86: pubkey hash
// - Second to last spent output:
// - header code: 0x8b9970 (not coinbase, height 100024)
// - reserved: 0x00
// - compressed txout:
// - 0x86c647: VLQ-encoded compressed amount for 13761000000 (137.61 BTC)
// - 0x00: special script type pay-to-pubkey-hash
// - 0xb2...ec: pubkey hash
// -----------------------------------------------------------------------------
// spentTxOut contains a spent transaction output and potentially additional
// contextual information such as whether or not it was contained in a coinbase
// transaction, the version of the transaction it was contained in, and which
// block height the containing transaction was included in. As described in
// the comments above, the additional contextual information will only be valid
// when this spent txout is spending the last unspent output of the containing
// transaction.
type spentTxOut struct {
amount int64 // The amount of the output.
pkScript []byte // The public key script for the output.
height int32 // Height of the the block containing the creating tx.
isCoinBase bool // Whether creating tx is a coinbase.
}
// spentTxOutHeaderCode returns the calculated header code to be used when
// serializing the provided stxo entry.
func spentTxOutHeaderCode(stxo *spentTxOut) uint64 {
// As described in the serialization format comments, the header code
// encodes the height shifted over one bit and the coinbase flag in the
// lowest bit.
headerCode := uint64(stxo.height) << 1
if stxo.isCoinBase {
headerCode |= 0x01
}
return headerCode
}
// spentTxOutSerializeSize returns the number of bytes it would take to
// serialize the passed stxo according to the format described above.
func spentTxOutSerializeSize(stxo *spentTxOut) int {
size := serializeSizeVLQ(spentTxOutHeaderCode(stxo))
if stxo.height > 0 {
// The legacy v1 spend journal format conditionally tracked the
// containing transaction version when the height was non-zero,
// so this is required for backwards compat.
size += serializeSizeVLQ(0)
}
return size + compressedTxOutSize(uint64(stxo.amount), stxo.pkScript)
}
// putSpentTxOut serializes the passed stxo according to the format described
// above directly into the passed target byte slice. The target byte slice must
// be at least large enough to handle the number of bytes returned by the
// spentTxOutSerializeSize function or it will panic.
func putSpentTxOut(target []byte, stxo *spentTxOut) int {
headerCode := spentTxOutHeaderCode(stxo)
offset := putVLQ(target, headerCode)
if stxo.height > 0 {
// The legacy v1 spend journal format conditionally tracked the
// containing transaction version when the height was non-zero,
// so this is required for backwards compat.
offset += putVLQ(target[offset:], 0)
}
return offset + putCompressedTxOut(target[offset:], uint64(stxo.amount),
stxo.pkScript)
}
// decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed
// by other data, into the passed stxo struct. It returns the number of bytes
// read.
func decodeSpentTxOut(serialized []byte, stxo *spentTxOut) (int, error) {
// Ensure there are bytes to decode.
if len(serialized) == 0 {
return 0, errDeserialize("no serialized bytes")
}
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return offset, errDeserialize("unexpected end of data after " +
"header code")
}
// Decode the header code.
//
// Bit 0 indicates containing transaction is a coinbase.
// Bits 1-x encode height of containing transaction.
stxo.isCoinBase = code&0x01 != 0
stxo.height = int32(code >> 1)
if stxo.height > 0 {
// The legacy v1 spend journal format conditionally tracked the
// containing transaction version when the height was non-zero,
// so this is required for backwards compat.
_, bytesRead := deserializeVLQ(serialized[offset:])
offset += bytesRead
if offset >= len(serialized) {
return offset, errDeserialize("unexpected end of data " +
"after reserved")
}
}
// Decode the compressed txout.
amount, pkScript, bytesRead, err := decodeCompressedTxOut(
serialized[offset:])
offset += bytesRead
if err != nil {
return offset, errDeserialize(fmt.Sprintf("unable to decode "+
"txout: %v", err))
}
stxo.amount = int64(amount)
stxo.pkScript = pkScript
return offset, nil
}
// deserializeSpendJournalEntry decodes the passed serialized byte slice into a
// slice of spent txouts according to the format described in detail above.
//
// Since the serialization format is not self describing, as noted in the
// format comments, this function also requires the transactions that spend the
// txouts.
func deserializeSpendJournalEntry(serialized []byte, txs []*wire.MsgTx) ([]spentTxOut, error) {
// Calculate the total number of stxos.
var numStxos int
for _, tx := range txs {
numStxos += len(tx.TxIn)
}
// When a block has no spent txouts there is nothing to serialize.
if len(serialized) == 0 {
// Ensure the block actually has no stxos. This should never
// happen unless there is database corruption or an empty entry
// erroneously made its way into the database.
if numStxos != 0 {
return nil, AssertError(fmt.Sprintf("mismatched spend "+
"journal serialization - no serialization for "+
"expected %d stxos", numStxos))
}
return nil, nil
}
// Loop backwards through all transactions so everything is read in
// reverse order to match the serialization order.
stxoIdx := numStxos - 1
offset := 0
stxos := make([]spentTxOut, numStxos)
for txIdx := len(txs) - 1; txIdx > -1; txIdx-- {
tx := txs[txIdx]
// Loop backwards through all of the transaction inputs and read
// the associated stxo.
for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- {
txIn := tx.TxIn[txInIdx]
stxo := &stxos[stxoIdx]
stxoIdx--
n, err := decodeSpentTxOut(serialized[offset:], stxo)
offset += n
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable "+
"to decode stxo for %v: %v",
txIn.PreviousOutPoint, err))
}
}
}
return stxos, nil
}
// serializeSpendJournalEntry serializes all of the passed spent txouts into a
// single byte slice according to the format described in detail above.
func serializeSpendJournalEntry(stxos []spentTxOut) []byte {
if len(stxos) == 0 {
return nil
}
// Calculate the size needed to serialize the entire journal entry.
var size int
for i := range stxos {
size += spentTxOutSerializeSize(&stxos[i])
}
serialized := make([]byte, size)
// Serialize each individual stxo directly into the slice in reverse
// order one after the other.
var offset int
for i := len(stxos) - 1; i > -1; i-- {
offset += putSpentTxOut(serialized[offset:], &stxos[i])
}
return serialized
}
// -----------------------------------------------------------------------------
// The unspent transaction output (UTXO) set consists of an entry for each
// unspent output using a format that is optimized to reduce space using domain
// specific compression algorithms. This format is a slightly modified version
// of the format used in Bitcoin Core.
//
// Each entry is keyed by an outpoint as specified below. It is important to
// note that the key encoding uses a VLQ, which employs an MSB encoding so
// iteration of UTXOs when doing byte-wise comparisons will produce them in
// order.
//
// The serialized key format is:
// <hash><output index>
//
// Field Type Size
// hash daghash.Hash daghash.HashSize
// output index VLQ variable
//
// The serialized value format is:
//
// <header code><compressed txout>
//
// Field Type Size
// header code VLQ variable
// compressed txout
// compressed amount VLQ variable
// compressed script []byte variable
//
// The serialized header code format is:
// bit 0 - containing transaction is a coinbase
// bits 1-x - height of the block that contains the unspent txout
//
// Example 1:
// From tx in main blockchain:
// Blk 1, b7c3332bc138e2c9429818f5fed500bcc1746544218772389054dc8047d7cd3f:0
//
// 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
// <><------------------------------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x03 (coinbase, height 1)
// - compressed txout:
// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
// - 0x04: special script type pay-to-pubkey
// - 0x96...52: x-coordinate of the pubkey
//
// Example 2:
// From tx in main blockchain:
// Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
//
// 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
// <----><------------------------------------------>
// | |
// header code compressed txout
//
// - header code: 0x8cf316 (not coinbase, height 113931)
// - compressed txout:
// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
// - 0x00: special script type pay-to-pubkey-hash
// - 0xb8...58: pubkey hash
//
// Example 3:
// From tx in main blockchain:
// Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
//
// a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
// <----><-------------------------------------------------->
// | |
// header code compressed txout
//
// - header code: 0xa8a258 (not coinbase, height 338156)
// - compressed txout:
// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
// - 0x01: special script type pay-to-script-hash
// - 0x1d...e6: script hash
// -----------------------------------------------------------------------------
// maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
// to serialize as a VLQ.
var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
// outpointKeyPool defines a concurrent safe free list of byte slices used to
// provide temporary buffers for outpoint database keys.
var outpointKeyPool = sync.Pool{
New: func() interface{} {
b := make([]byte, daghash.HashSize+maxUint32VLQSerializeSize)
return &b // Pointer to slice to avoid boxing alloc.
},
}
// outpointKey returns a key suitable for use as a database key in the UTXO set
// while making use of a free list. A new buffer is allocated if there are not
// already any available on the free list. The returned byte slice should be
// returned to the free list by using the recycleOutpointKey function when the
// caller is done with it _unless_ the slice will need to live for longer than
// the caller can calculate such as when used to write to the database.
func outpointKey(outpoint wire.OutPoint) *[]byte {
// A VLQ employs an MSB encoding, so they are useful not only to reduce
// the amount of storage space, but also so iteration of UTXOs when
// doing byte-wise comparisons will produce them in order.
key := outpointKeyPool.Get().(*[]byte)
idx := uint64(outpoint.Index)
*key = (*key)[:daghash.HashSize+serializeSizeVLQ(idx)]
copy(*key, outpoint.Hash[:])
putVLQ((*key)[daghash.HashSize:], idx)
return key
}
// recycleOutpointKey puts the provided byte slice, which should have been
// obtained via the outpointKey function, back on the free list.
func recycleOutpointKey(key *[]byte) {
outpointKeyPool.Put(key)
}
// utxoEntryHeaderCode returns the calculated header code to be used when
// serializing the provided utxo entry.
func utxoEntryHeaderCode(entry *UTXOEntry) uint64 {
// As described in the serialization format comments, the header code
// encodes the height shifted over one bit and the coinbase flag in the
// lowest bit.
headerCode := uint64(entry.BlockHeight()) << 1
if entry.IsCoinBase() {
headerCode |= 0x01
}
return headerCode
}
// serializeUTXOEntry returns the entry serialized to a format that is suitable
// for long-term storage. The format is described in detail above.
func serializeUTXOEntry(entry *UTXOEntry) ([]byte, error) {
// Encode the header code.
headerCode := utxoEntryHeaderCode(entry)
// Calculate the size needed to serialize the entry.
size := serializeSizeVLQ(headerCode) +
compressedTxOutSize(uint64(entry.Amount()), entry.PkScript())
// Serialize the header code followed by the compressed unspent
// transaction output.
serialized := make([]byte, size)
offset := putVLQ(serialized, headerCode)
offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
entry.PkScript())
return serialized, nil
}
// deserializeOutPoint decodes an outPoint from the passed serialized byte
// slice into a new wire.OutPoint using a format that is suitable for long-
// term storage. this format is described in detail above.
func deserializeOutPoint(serialized []byte) (*wire.OutPoint, error) {
if len(serialized) <= daghash.HashSize {
return nil, errDeserialize("unexpected end of data")
}
hash := daghash.Hash{}
hash.SetBytes(serialized[:daghash.HashSize])
index, _ := deserializeVLQ(serialized[daghash.HashSize:])
return wire.NewOutPoint(&hash, uint32(index)), nil
}
// deserializeUTXOEntry decodes a UTXO entry from the passed serialized byte
// slice into a new UTXOEntry using a format that is suitable for long-term
// storage. The format is described in detail above.
func deserializeUTXOEntry(serialized []byte) (*UTXOEntry, error) {
// Deserialize the header code.
code, offset := deserializeVLQ(serialized)
if offset >= len(serialized) {
return nil, errDeserialize("unexpected end of data after header")
}
// Decode the header code.
//
// Bit 0 indicates whether the containing transaction is a coinbase.
// Bits 1-x encode height of containing transaction.
isCoinBase := code&0x01 != 0
blockHeight := int32(code >> 1)
// Decode the compressed unspent transaction output.
amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:])
if err != nil {
return nil, errDeserialize(fmt.Sprintf("unable to decode "+
"UTXO: %v", err))
}
entry := &UTXOEntry{
amount: amount,
pkScript: pkScript,
blockHeight: blockHeight,
packedFlags: 0,
}
if isCoinBase {
entry.packedFlags |= tfCoinBase
}
return entry, nil
}
// dbPutUTXODiff uses an existing database transaction to update the UTXO set
// in the database based on the provided UTXO view contents and state. In
// particular, only the entries that have been marked as modified are written
// to the database.
func dbPutUTXODiff(dbTx database.Tx, diff *UTXODiff) error {
utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
for outPoint := range diff.toRemove {
key := outpointKey(outPoint)
err := utxoBucket.Delete(*key)
recycleOutpointKey(key)
if err != nil {
return err
}
}
for outPoint, entry := range diff.toAdd {
// Serialize and store the UTXO entry.
serialized, err := serializeUTXOEntry(entry)
if err != nil {
return err
}
key := outpointKey(outPoint)
err = utxoBucket.Put(*key, serialized)
// NOTE: The key is intentionally not recycled here since the
// database interface contract prohibits modifications. It will
// be garbage collected normally when the database is done with
// it.
if err != nil {
return err
}
}
return nil
}
// -----------------------------------------------------------------------------
// The block index consists of two buckets with an entry for every block in the
// main chain. One bucket is for the hash to height mapping and the other is
// for the height to hash mapping.
//
// The serialized format for values in the hash to height bucket is:
// <height>
//
// Field Type Size
// height uint32 4 bytes
//
// The serialized format for values in the height to hash bucket is:
// <hash>
//
// Field Type Size
// hash daghash.Hash daghash.HashSize
// -----------------------------------------------------------------------------
// dbPutBlockIndex uses an existing database transaction to update or add the
// block index entries for the hash to height and height to hash mappings for
// the provided values.
func dbPutBlockIndex(dbTx database.Tx, hash *daghash.Hash, height int32) error {
// Serialize the height for use in the index entries.
var serializedHeight [4]byte
byteOrder.PutUint32(serializedHeight[:], uint32(height))
// Add the block hash to height mapping to the index.
meta := dbTx.Metadata()
hashIndex := meta.Bucket(hashIndexBucketName)
if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil {
return err
}
// Add the block height to hash mapping to the index.
heightIndex := meta.Bucket(heightIndexBucketName)
return heightIndex.Put(serializedHeight[:], hash[:])
}
// dbFetchHeightByHash uses an existing database transaction to retrieve the
// height for the provided hash from the index.
func dbFetchHeightByHash(dbTx database.Tx, hash *daghash.Hash) (int32, error) {
meta := dbTx.Metadata()
hashIndex := meta.Bucket(hashIndexBucketName)
serializedHeight := hashIndex.Get(hash[:])
if serializedHeight == nil {
str := fmt.Sprintf("block %s is not in the main chain", hash)
return 0, errNotInDAG(str)
}
return int32(byteOrder.Uint32(serializedHeight)), nil
}
type dagState struct {
TipHashes []daghash.Hash
LastFinalityPoint daghash.Hash
LastSubNetworkID uint64
}
// serializeDAGState returns the serialization of the DAG state.
// This is data to be stored in the DAG state bucket.
func serializeDAGState(state *dagState) ([]byte, error) {
return json.Marshal(state)
}
// deserializeDAGState deserializes the passed serialized DAG state.
// This is data stored in the DAG state bucket and is updated after
// every block is connected to the DAG.
func deserializeDAGState(serializedData []byte) (*dagState, error) {
var state *dagState
err := json.Unmarshal(serializedData, &state)
if err != nil {
return nil, database.Error{
ErrorCode: database.ErrCorruption,
Description: "corrupt DAG state",
}
}
return state, nil
}
// dbPutDAGState uses an existing database transaction to store the latest
// tip hashes of the DAG.
func dbPutDAGState(dbTx database.Tx, state *dagState) error {
serializedData, err := serializeDAGState(state)
if err != nil {
return err
}
return dbTx.Metadata().Put(dagStateKeyName, serializedData)
}
// createDAGState initializes both the database and the DAG state to the
// genesis block. This includes creating the necessary buckets and inserting
// the genesis block, so it must only be called on an uninitialized database.
func (dag *BlockDAG) createDAGState() error {
// Create a new node from the genesis block and set it as the DAG.
genesisBlock := util.NewBlock(dag.dagParams.GenesisBlock)
genesisBlock.SetHeight(0)
header := &genesisBlock.MsgBlock().Header
node := newBlockNode(header, newSet(), dag.dagParams.K)
node.status = statusDataStored | statusValid
genesisCoinbase := genesisBlock.Transactions()[0].MsgTx()
genesisCoinbaseTxIn := genesisCoinbase.TxIn[0]
genesisCoinbaseTxOut := genesisCoinbase.TxOut[0]
genesisCoinbaseOutpoint := *wire.NewOutPoint(&genesisCoinbaseTxIn.PreviousOutPoint.Hash, genesisCoinbaseTxIn.PreviousOutPoint.Index)
genesisCoinbaseUTXOEntry := NewUTXOEntry(genesisCoinbaseTxOut, true, 0)
node.diff = &UTXODiff{
toAdd: utxoCollection{genesisCoinbaseOutpoint: genesisCoinbaseUTXOEntry},
toRemove: utxoCollection{},
}
dag.virtual.utxoSet.AddTx(genesisCoinbase, 0)
dag.virtual.SetTips(setFromSlice(node))
// Add the new node to the index which is used for faster lookups.
dag.index.addNode(node)
// Initiate the last finality point to the genesis block
dag.lastFinalityPoint = node
// Create the initial the database chain state including creating the
// necessary index buckets and inserting the genesis block.
err := dag.db.Update(func(dbTx database.Tx) error {
meta := dbTx.Metadata()
// Create the bucket that houses the block index data.
_, err := meta.CreateBucket(blockIndexBucketName)
if err != nil {
return err
}
// Create the bucket that houses the chain block hash to height
// index.
_, err = meta.CreateBucket(hashIndexBucketName)
if err != nil {
return err
}
// Create the bucket that houses the chain block height to hash
// index.
_, err = meta.CreateBucket(heightIndexBucketName)
if err != nil {
return err
}
// Create the bucket that houses the utxo set and store its
// version. Note that the genesis block coinbase transaction is
// intentionally not inserted here since it is not spendable by
// consensus rules.
_, err = meta.CreateBucket(utxoSetBucketName)
if err != nil {
return err
}
err = dbPutVersion(dbTx, utxoSetVersionKeyName,
latestUTXOSetBucketVersion)
if err != nil {
return err
}
// Create the bucket that houses the pending sub-networks.
_, err = meta.CreateBucket(pendingSubNetworksBucketName)
if err != nil {
return err
}
// Create the bucket that houses the registered sub-networks to
// their registry transactions index.
_, err = meta.CreateBucket(registeredSubNetworkTxsBucketName)
if err != nil {
return err
}
// Create the bucket that houses the registered sub-networks.
_, err = meta.CreateBucket(subNetworksBucketName)
if err != nil {
return err
}
// Save the genesis block to the block index database.
err = dbStoreBlockNode(dbTx, node)
if err != nil {
return err
}
// Add the genesis block hash to height and height to hash
// mappings to the index.
err = dbPutBlockIndex(dbTx, &node.hash, node.height)
if err != nil {
return err
}
// Store the current DAG state into the database.
state := &dagState{
TipHashes: dag.TipHashes(),
LastFinalityPoint: *genesisBlock.Hash(),
}
err = dbPutDAGState(dbTx, state)
if err != nil {
return err
}
// Store the genesis block into the database.
return dbStoreBlock(dbTx, genesisBlock)
})
return err
}
// initDAGState attempts to load and initialize the DAG state from the
// database. When the db does not yet contain any DAG state, both it and the
// DAG state are initialized to the genesis block.
func (dag *BlockDAG) initDAGState() error {
// Determine the state of the chain database. We may need to initialize
// everything from scratch or upgrade certain buckets.
var initialized bool
err := dag.db.View(func(dbTx database.Tx) error {
initialized = dbTx.Metadata().Get(dagStateKeyName) != nil
return nil
})
if err != nil {
return err
}
if !initialized {
// At this point the database has not already been initialized, so
// initialize both it and the chain state to the genesis block.
return dag.createDAGState()
}
// Attempt to load the DAG state from the database.
return dag.db.View(func(dbTx database.Tx) error {
// Fetch the stored DAG tipHashes from the database metadata.
// When it doesn't exist, it means the database hasn't been
// initialized for use with the DAG yet, so break out now to allow
// that to happen under a writable database transaction.
serializedData := dbTx.Metadata().Get(dagStateKeyName)
log.Tracef("Serialized DAG tip hashes: %x", serializedData)
state, err := deserializeDAGState(serializedData)
if err != nil {
return err
}
// Load all of the headers from the data for the known DAG
// and construct the block index accordingly. Since the
// number of nodes are already known, perform a single alloc
// for them versus a whole bunch of little ones to reduce
// pressure on the GC.
log.Infof("Loading block index...")
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
// Determine how many blocks will be loaded into the index so we can
// allocate the right amount.
var blockCount int32
cursor := blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
blockCount++
}
blockNodes := make([]blockNode, blockCount)
var i int32
var lastNode *blockNode
cursor = blockIndexBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
header, status, err := deserializeBlockRow(cursor.Value())
if err != nil {
return err
}
parents := newSet()
if lastNode == nil {
blockHash := header.BlockHash()
if !blockHash.IsEqual(dag.dagParams.GenesisHash) {
return AssertError(fmt.Sprintf("initDAGState: Expected "+
"first entry in block index to be genesis block, "+
"found %s", blockHash))
}
} else {
for _, hash := range header.ParentHashes {
parent := dag.index.LookupNode(&hash)
if parent == nil {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find parent %s for block %s", hash, header.BlockHash()))
}
parents.add(parent)
}
if len(parents) == 0 {
return AssertError(fmt.Sprintf("initDAGState: Could "+
"not find any parent for block %s", header.BlockHash()))
}
}
// Initialize the block node for the block, connect it,
// and add it to the block index.
node := &blockNodes[i]
initBlockNode(node, header, parents, dag.dagParams.K)
node.status = status
dag.index.addNode(node)
if blockStatus(status).KnownValid() {
dag.blockCount++
}
lastNode = node
i++
}
// Load all of the known UTXO entries and construct the full
// UTXO set accordingly. Since the number of entries is already
// known, perform a single alloc for them versus a whole bunch
// of little ones to reduce pressure on the GC.
log.Infof("Loading UTXO set...")
utxoEntryBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
// Determine how many UTXO entries will be loaded into the index so we can
// allocate the right amount.
var utxoEntryCount int32
cursor = utxoEntryBucket.Cursor()
for ok := cursor.First(); ok; ok = cursor.Next() {
utxoEntryCount++
}
fullUTXOCollection := make(utxoCollection, utxoEntryCount)
for ok := cursor.First(); ok; ok = cursor.Next() {
// Deserialize the outPoint
outPoint, err := deserializeOutPoint(cursor.Key())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt outPoint: %v", err),
}
}
return err
}
// Deserialize the utxo entry
entry, err := deserializeUTXOEntry(cursor.Value())
if err != nil {
// Ensure any deserialization errors are returned as database
// corruption errors.
if isDeserializeErr(err) {
return database.Error{
ErrorCode: database.ErrCorruption,
Description: fmt.Sprintf("corrupt utxo entry: %v", err),
}
}
return err
}
fullUTXOCollection[*outPoint] = entry
}
// Apply the loaded utxoCollection to the virtual block.
dag.virtual.utxoSet.utxoCollection = fullUTXOCollection
// Apply the stored tips to the virtual block.
tips := newSet()
for _, tipHash := range state.TipHashes {
tip := dag.index.LookupNode(&tipHash)
if tip == nil {
return AssertError(fmt.Sprintf("initDAGState: cannot find "+
"DAG tip %s in block index", state.TipHashes))
}
tips.add(tip)
}
dag.virtual.SetTips(tips)
// Set the last finality point
dag.lastFinalityPoint = dag.index.LookupNode(&state.LastFinalityPoint)
// Set the last sub-network ID
dag.lastSubNetworkID = state.LastSubNetworkID
return nil
})
}
// deserializeBlockRow parses a value in the block index bucket into a block
// header and block status bitfield.
func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
buffer := bytes.NewReader(blockRow)
var header wire.BlockHeader
err := header.Deserialize(buffer)
if err != nil {
return nil, statusNone, err
}
statusByte, err := buffer.ReadByte()
if err != nil {
return nil, statusNone, err
}
return &header, blockStatus(statusByte), nil
}
// dbFetchBlockByNode uses an existing database transaction to retrieve the
// raw block for the provided node, deserialize it, and return a util.Block
// with the height set.
func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*util.Block, error) {
// Load the raw block bytes from the database.
blockBytes, err := dbTx.FetchBlock(&node.hash)
if err != nil {
return nil, err
}
// Create the encapsulated block and set the height appropriately.
block, err := util.NewBlockFromBytes(blockBytes)
if err != nil {
return nil, err
}
block.SetHeight(node.height)
return block, nil
}
// dbStoreBlockNode stores the block header and validation status to the block
// index bucket. This overwrites the current entry if there exists one.
func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
// Serialize block data to be stored.
w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
header := node.Header()
err := header.Serialize(w)
if err != nil {
return err
}
err = w.WriteByte(byte(node.status))
if err != nil {
return err
}
value := w.Bytes()
// Write block header data to block index bucket.
blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
key := blockIndexKey(&node.hash, uint32(node.height))
return blockIndexBucket.Put(key, value)
}
// dbStoreBlock stores the provided block in the database if it is not already
// there. The full block data is written to ffldb.
func dbStoreBlock(dbTx database.Tx, block *util.Block) error {
hasBlock, err := dbTx.HasBlock(block.Hash())
if err != nil {
return err
}
if hasBlock {
return nil
}
return dbTx.StoreBlock(block)
}
// blockIndexKey generates the binary key for an entry in the block index
// bucket. The key is composed of the block height encoded as a big-endian
// 32-bit unsigned int followed by the 32 byte block hash.
func blockIndexKey(blockHash *daghash.Hash, blockHeight uint32) []byte {
indexKey := make([]byte, daghash.HashSize+4)
binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
copy(indexKey[4:daghash.HashSize+4], blockHash[:])
return indexKey
}
// BlockByHash returns the block from the main chain with the given hash with
// the appropriate chain height set.
//
// This function is safe for concurrent access.
func (dag *BlockDAG) BlockByHash(hash *daghash.Hash) (*util.Block, error) {
// Lookup the block hash in block index and ensure it is in the best
// chain.
node := dag.index.LookupNode(hash)
if node == nil {
str := fmt.Sprintf("block %s is not in the main chain", hash)
return nil, errNotInDAG(str)
}
// Load the block from the database and return it.
var block *util.Block
err := dag.db.View(func(dbTx database.Tx) error {
var err error
block, err = dbFetchBlockByNode(dbTx, node)
return err
})
return block, err
}