Remove unused utils and structures (#1526)

* Remove unused utils

* Remove unneeded randomness from tests

* Remove more unused functions

* Remove unused protobuf structures

* Fix small errors
This commit is contained in:
Elichai Turkel 2021-02-14 18:13:20 +02:00 committed by GitHub
parent 7b4b5668e2
commit a581dea127
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 85 additions and 1434 deletions

View File

@ -11,15 +11,11 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/util/mstime"
"github.com/kaspanet/kaspad/util/random"
)
// TestBlockHeader tests the MsgBlockHeader API.
func TestBlockHeader(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
nonce := uint64(0xba4d87a69924a93d)
hashes := []*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}

View File

@ -6,17 +6,12 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPing tests the MsgPing API against the latest protocol version.
func TestPing(t *testing.T) {
// Ensure we get the same nonce back out.
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: Error generating nonce: %v", err)
}
nonce := uint64(0x61c2c5535902862)
msg := NewMsgPing(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPing: wrong nonce - got %v, want %v",

View File

@ -6,16 +6,11 @@ package appmessage
import (
"testing"
"github.com/kaspanet/kaspad/util/random"
)
// TestPongLatest tests the MsgPong API against the latest protocol version.
func TestPongLatest(t *testing.T) {
nonce, err := random.Uint64()
if err != nil {
t.Errorf("random.Uint64: error generating nonce: %v", err)
}
nonce := uint64(0x1a05b581a5182c)
msg := NewMsgPong(nonce)
if msg.Nonce != nonce {
t.Errorf("NewMsgPong: wrong nonce - got %v, want %v",

View File

@ -21,7 +21,6 @@ import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/kaspanet/kaspad/util/pointers"
)
// BuildBlockVerboseData builds a BlockVerboseData from the given blockHeader.
@ -179,7 +178,7 @@ func (ctx *Context) buildTransactionVerboseOutputs(tx *externalapi.DomainTransac
passesFilter := len(filterAddrMap) == 0
var encodedAddr string
if addr != nil {
encodedAddr = *pointers.String(addr.EncodeAddress())
encodedAddr = addr.EncodeAddress()
// If the filter doesn't already pass, make it pass if
// the address exists in the filter.

View File

@ -1473,100 +1473,6 @@ func (x *DbUtxoDiff) GetToRemove() []*DbUtxoCollectionItem {
return nil
}
type DbPruningPointUTXOSetBytes struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"`
}
func (x *DbPruningPointUTXOSetBytes) Reset() {
*x = DbPruningPointUTXOSetBytes{}
if protoimpl.UnsafeEnabled {
mi := &file_dbobjects_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DbPruningPointUTXOSetBytes) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DbPruningPointUTXOSetBytes) ProtoMessage() {}
func (x *DbPruningPointUTXOSetBytes) ProtoReflect() protoreflect.Message {
mi := &file_dbobjects_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DbPruningPointUTXOSetBytes.ProtoReflect.Descriptor instead.
func (*DbPruningPointUTXOSetBytes) Descriptor() ([]byte, []int) {
return file_dbobjects_proto_rawDescGZIP(), []int{24}
}
func (x *DbPruningPointUTXOSetBytes) GetBytes() []byte {
if x != nil {
return x.Bytes
}
return nil
}
type DbHeaderTips struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Tips []*DbHash `protobuf:"bytes,1,rep,name=tips,proto3" json:"tips,omitempty"`
}
func (x *DbHeaderTips) Reset() {
*x = DbHeaderTips{}
if protoimpl.UnsafeEnabled {
mi := &file_dbobjects_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DbHeaderTips) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DbHeaderTips) ProtoMessage() {}
func (x *DbHeaderTips) ProtoReflect() protoreflect.Message {
mi := &file_dbobjects_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DbHeaderTips.ProtoReflect.Descriptor instead.
func (*DbHeaderTips) Descriptor() ([]byte, []int) {
return file_dbobjects_proto_rawDescGZIP(), []int{25}
}
func (x *DbHeaderTips) GetTips() []*DbHash {
if x != nil {
return x.Tips
}
return nil
}
type DbTips struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -1578,7 +1484,7 @@ type DbTips struct {
func (x *DbTips) Reset() {
*x = DbTips{}
if protoimpl.UnsafeEnabled {
mi := &file_dbobjects_proto_msgTypes[26]
mi := &file_dbobjects_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1591,7 +1497,7 @@ func (x *DbTips) String() string {
func (*DbTips) ProtoMessage() {}
func (x *DbTips) ProtoReflect() protoreflect.Message {
mi := &file_dbobjects_proto_msgTypes[26]
mi := &file_dbobjects_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1604,7 +1510,7 @@ func (x *DbTips) ProtoReflect() protoreflect.Message {
// Deprecated: Use DbTips.ProtoReflect.Descriptor instead.
func (*DbTips) Descriptor() ([]byte, []int) {
return file_dbobjects_proto_rawDescGZIP(), []int{26}
return file_dbobjects_proto_rawDescGZIP(), []int{24}
}
func (x *DbTips) GetTips() []*DbHash {
@ -1625,7 +1531,7 @@ type DbVirtualDiffParents struct {
func (x *DbVirtualDiffParents) Reset() {
*x = DbVirtualDiffParents{}
if protoimpl.UnsafeEnabled {
mi := &file_dbobjects_proto_msgTypes[27]
mi := &file_dbobjects_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1638,7 +1544,7 @@ func (x *DbVirtualDiffParents) String() string {
func (*DbVirtualDiffParents) ProtoMessage() {}
func (x *DbVirtualDiffParents) ProtoReflect() protoreflect.Message {
mi := &file_dbobjects_proto_msgTypes[27]
mi := &file_dbobjects_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1651,7 +1557,7 @@ func (x *DbVirtualDiffParents) ProtoReflect() protoreflect.Message {
// Deprecated: Use DbVirtualDiffParents.ProtoReflect.Descriptor instead.
func (*DbVirtualDiffParents) Descriptor() ([]byte, []int) {
return file_dbobjects_proto_rawDescGZIP(), []int{27}
return file_dbobjects_proto_rawDescGZIP(), []int{25}
}
func (x *DbVirtualDiffParents) GetVirtualDiffParents() []*DbHash {
@ -1672,7 +1578,7 @@ type DbBlockCount struct {
func (x *DbBlockCount) Reset() {
*x = DbBlockCount{}
if protoimpl.UnsafeEnabled {
mi := &file_dbobjects_proto_msgTypes[28]
mi := &file_dbobjects_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1685,7 +1591,7 @@ func (x *DbBlockCount) String() string {
func (*DbBlockCount) ProtoMessage() {}
func (x *DbBlockCount) ProtoReflect() protoreflect.Message {
mi := &file_dbobjects_proto_msgTypes[28]
mi := &file_dbobjects_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1698,7 +1604,7 @@ func (x *DbBlockCount) ProtoReflect() protoreflect.Message {
// Deprecated: Use DbBlockCount.ProtoReflect.Descriptor instead.
func (*DbBlockCount) Descriptor() ([]byte, []int) {
return file_dbobjects_proto_rawDescGZIP(), []int{28}
return file_dbobjects_proto_rawDescGZIP(), []int{26}
}
func (x *DbBlockCount) GetCount() uint64 {
@ -1719,7 +1625,7 @@ type DbBlockHeaderCount struct {
func (x *DbBlockHeaderCount) Reset() {
*x = DbBlockHeaderCount{}
if protoimpl.UnsafeEnabled {
mi := &file_dbobjects_proto_msgTypes[29]
mi := &file_dbobjects_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1732,7 +1638,7 @@ func (x *DbBlockHeaderCount) String() string {
func (*DbBlockHeaderCount) ProtoMessage() {}
func (x *DbBlockHeaderCount) ProtoReflect() protoreflect.Message {
mi := &file_dbobjects_proto_msgTypes[29]
mi := &file_dbobjects_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1745,7 +1651,7 @@ func (x *DbBlockHeaderCount) ProtoReflect() protoreflect.Message {
// Deprecated: Use DbBlockHeaderCount.ProtoReflect.Descriptor instead.
func (*DbBlockHeaderCount) Descriptor() ([]byte, []int) {
return file_dbobjects_proto_rawDescGZIP(), []int{29}
return file_dbobjects_proto_rawDescGZIP(), []int{27}
}
func (x *DbBlockHeaderCount) GetCount() uint64 {
@ -1981,32 +1887,25 @@ var file_dbobjects_proto_rawDesc = []byte{
0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55,
0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65,
0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x44,
0x62, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58,
0x4f, 0x53, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74,
0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22,
0x39, 0x0a, 0x0c, 0x44, 0x62, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x69, 0x70, 0x73, 0x12,
0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62,
0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22, 0x33, 0x0a, 0x06, 0x44, 0x62,
0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22,
0x5d, 0x0a, 0x14, 0x44, 0x62, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66,
0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x12, 0x76, 0x69, 0x72, 0x74, 0x75,
0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
0x6d, 0x52, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x33, 0x0a, 0x06, 0x44,
0x62, 0x54, 0x69, 0x70, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12, 0x76, 0x69, 0x72, 0x74,
0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x24,
0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14,
0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b,
0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f, 0x73,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73,
0x22, 0x5d, 0x0a, 0x14, 0x44, 0x62, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66,
0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x12, 0x76, 0x69, 0x72, 0x74,
0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12, 0x76, 0x69, 0x72,
0x74, 0x75, 0x61, 0x6c, 0x44, 0x69, 0x66, 0x66, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22,
0x24, 0x0a, 0x0c, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e,
0x74, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x6b, 0x61, 0x73, 0x70, 0x61, 0x6e, 0x65, 0x74, 0x2f, 0x6b, 0x61, 0x73, 0x70, 0x61, 0x64, 0x2f,
0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -2021,7 +1920,7 @@ func file_dbobjects_proto_rawDescGZIP() []byte {
return file_dbobjects_proto_rawDescData
}
var file_dbobjects_proto_msgTypes = make([]protoimpl.MessageInfo, 30)
var file_dbobjects_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
var file_dbobjects_proto_goTypes = []interface{}{
(*DbBlock)(nil), // 0: serialization.DbBlock
(*DbBlockHeader)(nil), // 1: serialization.DbBlockHeader
@ -2047,12 +1946,10 @@ var file_dbobjects_proto_goTypes = []interface{}{
(*DbReachabilityData)(nil), // 21: serialization.DbReachabilityData
(*DbReachabilityInterval)(nil), // 22: serialization.DbReachabilityInterval
(*DbUtxoDiff)(nil), // 23: serialization.DbUtxoDiff
(*DbPruningPointUTXOSetBytes)(nil), // 24: serialization.DbPruningPointUTXOSetBytes
(*DbHeaderTips)(nil), // 25: serialization.DbHeaderTips
(*DbTips)(nil), // 26: serialization.DbTips
(*DbVirtualDiffParents)(nil), // 27: serialization.DbVirtualDiffParents
(*DbBlockCount)(nil), // 28: serialization.DbBlockCount
(*DbBlockHeaderCount)(nil), // 29: serialization.DbBlockHeaderCount
(*DbTips)(nil), // 24: serialization.DbTips
(*DbVirtualDiffParents)(nil), // 25: serialization.DbVirtualDiffParents
(*DbBlockCount)(nil), // 26: serialization.DbBlockCount
(*DbBlockHeaderCount)(nil), // 27: serialization.DbBlockHeaderCount
}
var file_dbobjects_proto_depIdxs = []int32{
1, // 0: serialization.DbBlock.header:type_name -> serialization.DbBlockHeader
@ -2090,14 +1987,13 @@ var file_dbobjects_proto_depIdxs = []int32{
2, // 32: serialization.DbReachabilityData.futureCoveringSet:type_name -> serialization.DbHash
18, // 33: serialization.DbUtxoDiff.toAdd:type_name -> serialization.DbUtxoCollectionItem
18, // 34: serialization.DbUtxoDiff.toRemove:type_name -> serialization.DbUtxoCollectionItem
2, // 35: serialization.DbHeaderTips.tips:type_name -> serialization.DbHash
2, // 36: serialization.DbTips.tips:type_name -> serialization.DbHash
2, // 37: serialization.DbVirtualDiffParents.virtualDiffParents:type_name -> serialization.DbHash
38, // [38:38] is the sub-list for method output_type
38, // [38:38] is the sub-list for method input_type
38, // [38:38] is the sub-list for extension type_name
38, // [38:38] is the sub-list for extension extendee
0, // [0:38] is the sub-list for field type_name
2, // 35: serialization.DbTips.tips:type_name -> serialization.DbHash
2, // 36: serialization.DbVirtualDiffParents.virtualDiffParents:type_name -> serialization.DbHash
37, // [37:37] is the sub-list for method output_type
37, // [37:37] is the sub-list for method input_type
37, // [37:37] is the sub-list for extension type_name
37, // [37:37] is the sub-list for extension extendee
0, // [0:37] is the sub-list for field type_name
}
func init() { file_dbobjects_proto_init() }
@ -2395,30 +2291,6 @@ func file_dbobjects_proto_init() {
}
}
file_dbobjects_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DbPruningPointUTXOSetBytes); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_dbobjects_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DbHeaderTips); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_dbobjects_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DbTips); i {
case 0:
return &v.state
@ -2430,7 +2302,7 @@ func file_dbobjects_proto_init() {
return nil
}
}
file_dbobjects_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
file_dbobjects_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DbVirtualDiffParents); i {
case 0:
return &v.state
@ -2442,7 +2314,7 @@ func file_dbobjects_proto_init() {
return nil
}
}
file_dbobjects_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
file_dbobjects_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DbBlockCount); i {
case 0:
return &v.state
@ -2454,7 +2326,7 @@ func file_dbobjects_proto_init() {
return nil
}
}
file_dbobjects_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
file_dbobjects_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DbBlockHeaderCount); i {
case 0:
return &v.state
@ -2473,7 +2345,7 @@ func file_dbobjects_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_dbobjects_proto_rawDesc,
NumEnums: 0,
NumMessages: 30,
NumMessages: 28,
NumExtensions: 0,
NumServices: 0,
},

View File

@ -139,14 +139,6 @@ message DbUtxoDiff {
repeated DbUtxoCollectionItem toRemove = 2;
}
message DbPruningPointUTXOSetBytes {
bytes bytes = 1;
}
message DbHeaderTips {
repeated DbHash tips = 1;
}
message DbTips {
repeated DbHash tips = 1;
}

View File

@ -1,17 +0,0 @@
package serialization
import (
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// HeaderTipsToDBHeaderTips converts a slice of hashes to DbHeaderTips
func HeaderTipsToDBHeaderTips(tips []*externalapi.DomainHash) *DbHeaderTips {
return &DbHeaderTips{
Tips: DomainHashesToDbHashes(tips),
}
}
// DBHeaderTipsToHeaderTips converts DbHeaderTips to a slice of hashes
func DBHeaderTipsToHeaderTips(dbHeaderTips *DbHeaderTips) ([]*externalapi.DomainHash, error) {
return DbHashesToDomainHashes(dbHeaderTips.Tips)
}

View File

@ -196,20 +196,6 @@ func (ps *pruningStore) deserializePruningPoint(pruningPointBytes []byte) (*exte
return serialization.DbHashToDomainHash(dbHash)
}
func (ps *pruningStore) serializeUTXOSetBytes(pruningPointUTXOSetBytes []byte) ([]byte, error) {
return proto.Marshal(&serialization.DbPruningPointUTXOSetBytes{Bytes: pruningPointUTXOSetBytes})
}
func (ps *pruningStore) deserializeUTXOSetBytes(dbPruningPointUTXOSetBytes []byte) ([]byte, error) {
dbPruningPointUTXOSet := &serialization.DbPruningPointUTXOSetBytes{}
err := proto.Unmarshal(dbPruningPointUTXOSetBytes, dbPruningPointUTXOSet)
if err != nil {
return nil, err
}
return dbPruningPointUTXOSet.Bytes, nil
}
func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader) (bool, error) {
if ps.pruningPointStaging != nil {
return true, nil

View File

@ -1,21 +0,0 @@
package hashes
import (
"math/big"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
)
// ToBig converts a externalapi.DomainHash into a big.Int that can be used to
// perform math comparisons.
func ToBig(hash *externalapi.DomainHash) *big.Int {
// A Hash is in little-endian, but the big package wants the bytes in
// big-endian, so reverse them.
buf := hash.ByteArray()
blen := len(buf)
for i := 0; i < blen/2; i++ {
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
}
return new(big.Int).SetBytes(buf[:])
}

View File

@ -312,7 +312,7 @@ func TestPayToAddrScript(t *testing.T) {
// Taken from transaction:
// b0539a45de13b3e0403909b8bd1a555b8cbe45fd4e3f3fda76f3a5f52835c29d
p2shMain, _ := util.NewAddressScriptHashFromHash(hexToBytes("e8c300"+
p2shMain, err := util.NewAddressScriptHashFromHash(hexToBytes("e8c300"+
"c87986efa84c37c0519929019ef86eb5b4"), util.Bech32PrefixKaspa)
if err != nil {
t.Fatalf("Unable to create script hash address: %v", err)

View File

@ -11,7 +11,7 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
const _ = grpc.SupportPackageIsVersion7
// P2PClient is the client API for P2P service.
//
@ -71,12 +71,19 @@ type P2PServer interface {
type UnimplementedP2PServer struct {
}
func (*UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error {
func (UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error {
return status.Errorf(codes.Unimplemented, "method MessageStream not implemented")
}
func (*UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {}
func (UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {}
func RegisterP2PServer(s *grpc.Server, srv P2PServer) {
// UnsafeP2PServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to P2PServer will
// result in compilation errors.
type UnsafeP2PServer interface {
mustEmbedUnimplementedP2PServer()
}
func RegisterP2PServer(s grpc.ServiceRegistrar, srv P2PServer) {
s.RegisterService(&_P2P_serviceDesc, srv)
}
@ -179,12 +186,19 @@ type RPCServer interface {
type UnimplementedRPCServer struct {
}
func (*UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error {
func (UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error {
return status.Errorf(codes.Unimplemented, "method MessageStream not implemented")
}
func (*UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {}
func (UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {}
func RegisterRPCServer(s *grpc.Server, srv RPCServer) {
// UnsafeRPCServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to RPCServer will
// result in compilation errors.
type UnsafeRPCServer interface {
mustEmbedUnimplementedRPCServer()
}
func RegisterRPCServer(s grpc.ServiceRegistrar, srv RPCServer) {
s.RegisterService(&_RPC_serviceDesc, srv)
}

View File

@ -12,8 +12,6 @@ import (
"github.com/pkg/errors"
)
const ()
// SetLimits raises some process limits to values which allow kaspad and
// associated utilities to run.
func SetLimits(desiredLimits *DesiredLimits) error {

View File

@ -40,7 +40,7 @@ func TestBech32(t *testing.T) {
if !test.valid {
// Invalid string decoding should result in error.
if err == nil {
t.Error("expected decoding to fail for "+
t.Errorf("expected decoding to fail for "+
"invalid string %v", test.str)
}
continue

View File

@ -1,143 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package util
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
_ "crypto/sha512" // Needed for RegisterHash in init
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"github.com/pkg/errors"
"math/big"
"net"
"os"
"time"
)
// NewTLSCertPair returns a new PEM-encoded x.509 certificate pair
// based on a 521-bit ECDSA private key. The machine's local interface
// addresses and all variants of IPv4 and IPv6 localhost are included as
// valid IP addresses.
func NewTLSCertPair(organization string, validUntil time.Time, extraHosts []string) (cert, key []byte, err error) {
now := time.Now()
if validUntil.Before(now) {
return nil, nil, errors.New("validUntil would create an already-expired certificate")
}
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return nil, nil, err
}
// end of ASN.1 time
endOfTime := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
if validUntil.After(endOfTime) {
validUntil = endOfTime
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, errors.Errorf("failed to generate serial number: %s", err)
}
host, err := os.Hostname()
if err != nil {
return nil, nil, err
}
ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}
dnsNames := []string{host}
if host != "localhost" {
dnsNames = append(dnsNames, "localhost")
}
addIP := func(ipAddr net.IP) {
for _, ip := range ipAddresses {
if ip.Equal(ipAddr) {
return
}
}
ipAddresses = append(ipAddresses, ipAddr)
}
addHost := func(host string) {
for _, dnsName := range dnsNames {
if host == dnsName {
return
}
}
dnsNames = append(dnsNames, host)
}
addrs, err := interfaceAddrs()
if err != nil {
return nil, nil, err
}
for _, a := range addrs {
ipAddr, _, err := net.ParseCIDR(a.String())
if err == nil {
addIP(ipAddr)
}
}
for _, hostStr := range extraHosts {
host, _, err := net.SplitHostPort(hostStr)
if err != nil {
host = hostStr
}
if ip := net.ParseIP(host); ip != nil {
addIP(ip)
} else {
addHost(host)
}
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{organization},
CommonName: host,
},
NotBefore: now.Add(-time.Hour * 24),
NotAfter: validUntil,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature |
x509.KeyUsageCertSign,
IsCA: true, // so can sign self.
BasicConstraintsValid: true,
DNSNames: dnsNames,
IPAddresses: ipAddresses,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template,
&template, &priv.PublicKey, priv)
if err != nil {
return nil, nil, errors.Errorf("failed to create certificate: %s", err)
}
certBuf := &bytes.Buffer{}
err = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
if err != nil {
return nil, nil, errors.Errorf("failed to encode certificate: %s", err)
}
keybytes, err := x509.MarshalECPrivateKey(priv)
if err != nil {
return nil, nil, errors.Errorf("failed to marshal private key: %s", err)
}
keyBuf := &bytes.Buffer{}
err = pem.Encode(keyBuf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keybytes})
if err != nil {
return nil, nil, errors.Errorf("failed to encode private key: %s", err)
}
return certBuf.Bytes(), keyBuf.Bytes(), nil
}

View File

@ -1,123 +0,0 @@
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package util_test
import (
"crypto/x509"
"encoding/pem"
"net"
"testing"
"time"
"github.com/kaspanet/kaspad/util"
//"github.com/davecgh/go-spew/spew"
)
// TestNewTLSCertPair ensures the NewTLSCertPair function works as expected.
func TestNewTLSCertPair(t *testing.T) {
// Certs don't support sub-second precision, so truncate it now to
// ensure the checks later don't fail due to nanosecond precision
// differences.
validUntil := time.Unix(time.Now().Add(10*365*24*time.Hour).Unix(), 0)
org := "test autogenerated cert"
extraHosts := []string{"testtlscert.bogus", "localhost", "127.0.0.1"}
cert, key, err := util.NewTLSCertPair(org, validUntil, extraHosts)
if err != nil {
t.Fatalf("failed with unexpected error: %v", err)
}
// Ensure the PEM-encoded cert that is returned can be decoded.
pemCert, _ := pem.Decode(cert)
if pemCert == nil {
t.Fatalf("pem.Decode was unable to decode the certificate")
}
// Ensure the PEM-encoded key that is returned can be decoded.
pemKey, _ := pem.Decode(key)
if pemCert == nil {
t.Fatalf("pem.Decode was unable to decode the key")
}
// Ensure the DER-encoded key bytes can be successfully parsed.
_, err = x509.ParseECPrivateKey(pemKey.Bytes)
if err != nil {
t.Fatalf("failed with unexpected error: %v", err)
}
// Ensure the DER-encoded cert bytes can be successfully into an X.509
// certificate.
x509Cert, err := x509.ParseCertificate(pemCert.Bytes)
if err != nil {
t.Fatalf("failed with unexpected error: %v", err)
}
// Ensure the specified organization is correct.
x509Orgs := x509Cert.Subject.Organization
if len(x509Orgs) == 0 || x509Orgs[0] != org {
x509Org := "<no organization>"
if len(x509Orgs) > 0 {
x509Org = x509Orgs[0]
}
t.Fatalf("generated cert organization field mismatch, got "+
"'%v', want '%v'", x509Org, org)
}
// Ensure the specified valid until value is correct.
if !x509Cert.NotAfter.Equal(validUntil) {
t.Fatalf("generated cert valid until field mismatch, got %v, "+
"want %v", x509Cert.NotAfter, validUntil)
}
// Ensure the specified extra hosts are present.
for _, host := range extraHosts {
if err := x509Cert.VerifyHostname(host); err != nil {
t.Fatalf("failed to verify extra host '%s'", host)
}
}
// Ensure that the Common Name is also the first SAN DNS name.
cn := x509Cert.Subject.CommonName
san0 := x509Cert.DNSNames[0]
if cn != san0 {
t.Errorf("common name %s does not match first SAN %s", cn, san0)
}
// Ensure there are no duplicate hosts or IPs.
hostCounts := make(map[string]int)
for _, host := range x509Cert.DNSNames {
hostCounts[host]++
}
ipCounts := make(map[string]int)
for _, ip := range x509Cert.IPAddresses {
ipCounts[string(ip)]++
}
for host, count := range hostCounts {
if count != 1 {
t.Errorf("host %s appears %d times in certificate", host, count)
}
}
for ipStr, count := range ipCounts {
if count != 1 {
t.Errorf("ip %s appears %d times in certificate", net.IP(ipStr), count)
}
}
// Ensure the cert can be use for the intended purposes.
if !x509Cert.IsCA {
t.Fatal("generated cert is not a certificate authority")
}
if x509Cert.KeyUsage&x509.KeyUsageKeyEncipherment == 0 {
t.Fatal("generated cert can't be used for key encipherment")
}
if x509Cert.KeyUsage&x509.KeyUsageDigitalSignature == 0 {
t.Fatal("generated cert can't be used for digital signatures")
}
if x509Cert.KeyUsage&x509.KeyUsageCertSign == 0 {
t.Fatal("generated cert can't be used for signing other certs")
}
if !x509Cert.BasicConstraintsValid {
t.Fatal("generated cert does not have valid basic constraints")
}
}

View File

@ -1,13 +0,0 @@
package fs
import "os"
// FileExists reports whether the named file or directory exists.
func FileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@ -1,79 +0,0 @@
package locks
import (
"sync"
)
// PriorityMutex is a read-write mutex with an additional low
// priority lock. It's implemented with the following
// components:
// * Data mutex: The actual lock on the data structure. Its
// type is sync.RWMutex for its high priority read lock.
// * High priority waiting group: A waiting group that is being
// incremented every time a high priority lock (read or write)
// is acquired, and decremented every time a high priority lock is
// unlocked. Low priority locks can start being held only
// when the waiting group is empty.
// * Low priority mutex: This mutex ensures that when the
// waiting group is empty, only one low priority lock
// will be able to lock the data mutex.
// PriorityMutex implements a lock with three priorities:
// * High priority write lock - locks the mutex with the highest priority.
// * High priority read lock - locks the mutex with lower priority than
// the high priority write lock. Can be held concurrently with other
// with other read locks.
// * Low priority write lock - locks the mutex with lower priority then
// the read lock.
type PriorityMutex struct {
dataMutex sync.RWMutex
highPriorityWaiting *waitGroup
lowPriorityMutex sync.Mutex
}
// NewPriorityMutex returns a new priority mutex
func NewPriorityMutex() *PriorityMutex {
lock := PriorityMutex{
highPriorityWaiting: newWaitGroup(),
}
return &lock
}
// LowPriorityWriteLock acquires a low-priority write lock.
func (mtx *PriorityMutex) LowPriorityWriteLock() {
mtx.lowPriorityMutex.Lock()
mtx.highPriorityWaiting.wait()
mtx.dataMutex.Lock()
}
// LowPriorityWriteUnlock unlocks the low-priority write lock
func (mtx *PriorityMutex) LowPriorityWriteUnlock() {
mtx.dataMutex.Unlock()
mtx.lowPriorityMutex.Unlock()
}
// HighPriorityWriteLock acquires a high-priority write lock.
func (mtx *PriorityMutex) HighPriorityWriteLock() {
mtx.highPriorityWaiting.add(1)
mtx.dataMutex.Lock()
}
// HighPriorityWriteUnlock unlocks the high-priority write lock
func (mtx *PriorityMutex) HighPriorityWriteUnlock() {
mtx.dataMutex.Unlock()
mtx.highPriorityWaiting.done()
}
// HighPriorityReadLock acquires a high-priority read
// lock.
func (mtx *PriorityMutex) HighPriorityReadLock() {
mtx.highPriorityWaiting.add(1)
mtx.dataMutex.RLock()
}
// HighPriorityReadUnlock unlocks the high-priority read
// lock
func (mtx *PriorityMutex) HighPriorityReadUnlock() {
mtx.highPriorityWaiting.done()
mtx.dataMutex.RUnlock()
}

View File

@ -1,80 +0,0 @@
package locks
import (
"reflect"
"sync"
"testing"
"time"
)
func TestPriorityMutex(t *testing.T) {
mtx := NewPriorityMutex()
sharedSlice := []int{}
lowPriorityLockAcquired := false
wg := sync.WaitGroup{}
wg.Add(3)
mtx.HighPriorityWriteLock()
go func() {
mtx.LowPriorityWriteLock()
defer mtx.LowPriorityWriteUnlock()
sharedSlice = append(sharedSlice, 2)
lowPriorityLockAcquired = true
wg.Done()
}()
go func() {
mtx.HighPriorityReadLock()
defer mtx.HighPriorityReadUnlock()
if lowPriorityLockAcquired {
t.Errorf("LowPriorityWriteLock unexpectedly released")
}
wg.Done()
}()
go func() {
mtx.HighPriorityWriteLock()
defer mtx.HighPriorityWriteUnlock()
sharedSlice = append(sharedSlice, 1)
if lowPriorityLockAcquired {
t.Errorf("LowPriorityWriteLock unexpectedly released")
}
wg.Done()
}()
time.Sleep(time.Second)
mtx.HighPriorityWriteUnlock()
wg.Wait()
expectedSlice := []int{1, 2}
if !reflect.DeepEqual(sharedSlice, expectedSlice) {
t.Errorf("Expected the shared slice to be %d but got %d", expectedSlice, sharedSlice)
}
}
func TestHighPriorityReadLock(t *testing.T) {
mtx := NewPriorityMutex()
wg := sync.WaitGroup{}
wg.Add(2)
mtx.LowPriorityWriteLock()
isReadLockHeld := false
ch := make(chan struct{})
go func() {
mtx.HighPriorityReadLock()
defer mtx.HighPriorityReadUnlock()
isReadLockHeld = true
ch <- struct{}{}
<-ch
isReadLockHeld = false
wg.Done()
}()
go func() {
mtx.HighPriorityReadLock()
defer mtx.HighPriorityReadUnlock()
<-ch
if !isReadLockHeld {
t.Errorf("expected another read lock to be held concurrently")
}
ch <- struct{}{}
wg.Done()
}()
time.Sleep(time.Second)
mtx.LowPriorityWriteUnlock()
wg.Wait()
}

View File

@ -1,108 +0,0 @@
package locks
import (
"sync"
"sync/atomic"
)
// waitGroup is a type that implements the same API
// as sync.WaitGroup but allows concurrent calls to
// add() and wait().
//
// The wait group maintains a counter that can be
// increased by delta by using the waitGroup.add
// method, and decreased by 1 by using the
// waitGroup.done method.
// The general idea of the waitGroup.wait method
// is to block the current goroutine until the
// counter is set to 0. This is how it's implemented:
//
// 1) The waitGroup.wait method is checking if waitGroup.counter
// is 0. If it's the case the function returns. otherwise,
// it sets the flag waitGroup.isReleaseWaitWaiting to 1 so
// that there's a pending wait function, and waits for a signal
// from the channel waitGroup.releaseWait (waitGroup.isReleaseWaitWaiting
// is set to 1 wrapped with waitGroup.isReleaseWaitWaitingLock to
// synchronize with the reader from waitGroup.done).
//
// 2) When waitGroup.done is called, it checks if waitGroup.counter
// is 0.
//
// 3) If waitGroup.counter is 0, it also checks if there's any pending
// wait function by checking if wg.isReleaseWaitWaiting is 1, and if
// this is the case, it sends a signal to release the pending wait
// function, and then waits for a signal from waitGroup.releaseDone,
// and when the signal is received, the function returns.
// This step is wrapped with isReleaseWaitWaitingLock for two reasons:
// a) Prevent a situation where waitGroup.wait goroutine yields just
// before it sets wg.isReleaseWaitWaiting to 1, and then
// waitGroup.done will exit the function without sending the signal
// to waitGroup.wait.
// b) Prevent two waitGroup.done send concurrently a signal to the
// channel wg.releaseWait and making one of them hang forever.
//
// 4) After the waitGroup.wait is released, it sets
// waitGroup.isReleaseWaitWaiting to 0, and sends
// a signal to wg.releaseDone and go back to step 1.
//
// The waitGroup.wait is wrapped with waitGroup.mainWaitLock. It
// is used to enable multiple waits pending for the counter to be
// set to zero. This will cause a situation when one wait function
// will return, the other waits that are pending to waitGroup.mainWaitLock
// will immediately return as well. Without that lock, any call
// to waitGroup.wait will wait to its own signal from waitGroup.releaseWait
// which means that for n waits to be unblocked, the counter has to be set
// to 0 n times.
type waitGroup struct {
counter, isReleaseWaitWaiting int64
mainWaitLock, isReleaseWaitWaitingLock sync.Mutex
releaseWait, releaseDone chan struct{}
}
func newWaitGroup() *waitGroup {
return &waitGroup{
releaseWait: make(chan struct{}),
releaseDone: make(chan struct{}),
}
}
func (wg *waitGroup) add(delta int64) {
atomic.AddInt64(&wg.counter, delta)
}
func (wg *waitGroup) done() {
counter := atomic.AddInt64(&wg.counter, -1)
if counter < 0 {
panic("negative values for wg.counter are not allowed. This was likely caused by calling done() before add()")
}
// To avoid a situation where a struct is
// being sent to wg.releaseWait while there
// are no listeners to the channel (which will
// cause the goroutine to hang for eternity),
// we check wg.isReleaseWaitWaiting to see
// if there is a listener to wg.releaseWait.
if atomic.LoadInt64(&wg.counter) == 0 {
wg.isReleaseWaitWaitingLock.Lock()
defer wg.isReleaseWaitWaitingLock.Unlock()
if atomic.LoadInt64(&wg.isReleaseWaitWaiting) == 1 {
wg.releaseWait <- struct{}{}
<-wg.releaseDone
}
}
}
func (wg *waitGroup) wait() {
wg.mainWaitLock.Lock()
defer wg.mainWaitLock.Unlock()
wg.isReleaseWaitWaitingLock.Lock()
defer wg.isReleaseWaitWaitingLock.Unlock()
for atomic.LoadInt64(&wg.counter) != 0 {
atomic.StoreInt64(&wg.isReleaseWaitWaiting, 1)
wg.isReleaseWaitWaitingLock.Unlock()
<-wg.releaseWait
atomic.StoreInt64(&wg.isReleaseWaitWaiting, 0)
wg.releaseDone <- struct{}{}
wg.isReleaseWaitWaitingLock.Lock()
}
}

View File

@ -1,213 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package locks
import (
"sync/atomic"
"testing"
)
// All of the tests, except TestAddAfterWait and
// TestWaitAfterAddDoneCounterHasReset, are copied
// from the native sync/waitgroup_test.go (with some
// minor changes), to check that the new waitGroup
// behaves the same, except enabling the use of add()
// concurrently with wait()
func testWaitGroup(t *testing.T, wg1 *waitGroup, wg2 *waitGroup) {
n := int64(16)
wg1.add(n)
wg2.add(n)
exited := make(chan struct{}, n)
for i := int64(0); i != n; i++ {
go func(i int64) {
wg1.done()
wg2.wait()
exited <- struct{}{}
}(i)
}
wg1.wait()
for i := int64(0); i != n; i++ {
select {
case <-exited:
t.Fatal("waitGroup released group too soon")
default:
}
wg2.done()
}
for i := int64(0); i != n; i++ {
<-exited // Will block if barrier fails to unlock someone.
}
}
func TestWaitGroup(t *testing.T) {
wg1 := newWaitGroup()
wg2 := newWaitGroup()
// Run the same test a few times to ensure barrier is in a proper state.
for i := 0; i != 1000; i++ {
testWaitGroup(t, wg1, wg2)
}
}
func TestWaitGroupMisuse(t *testing.T) {
defer func() {
err := recover()
if err != "negative values for wg.counter are not allowed. This was likely caused by calling done() before add()" {
t.Fatalf("Unexpected panic: %#v", err)
}
}()
wg := newWaitGroup()
wg.add(1)
wg.done()
wg.done()
t.Fatal("Should panic, because wg.counter should be negative (-1), which is not allowed")
}
func TestAddAfterWait(t *testing.T) {
wg := newWaitGroup()
wg.add(1)
syncChan := make(chan struct{})
go func() {
syncChan <- struct{}{}
wg.wait()
syncChan <- struct{}{}
}()
<-syncChan
wg.add(1)
wg.done()
wg.done()
<-syncChan
}
func TestWaitGroupRace(t *testing.T) {
// Run this test for about 1ms.
for i := 0; i < 1000; i++ {
wg := newWaitGroup()
n := new(int32)
// spawn goroutine 1
wg.add(1)
go func() {
atomic.AddInt32(n, 1)
wg.done()
}()
// spawn goroutine 2
wg.add(1)
go func() {
atomic.AddInt32(n, 1)
wg.done()
}()
// Wait for goroutine 1 and 2
wg.wait()
if atomic.LoadInt32(n) != 2 {
t.Fatal("Spurious wakeup from Wait")
}
}
}
func TestWaitGroupAlign(t *testing.T) {
type X struct {
x byte
wg *waitGroup
}
x := X{wg: newWaitGroup()}
x.wg.add(1)
go func(x *X) {
x.wg.done()
}(&x)
x.wg.wait()
}
func TestWaitAfterAddDoneCounterHasReset(t *testing.T) {
wg := newWaitGroup()
wg.add(1)
wg.done()
wg.add(1)
wg.done()
wg.wait()
}
func BenchmarkWaitGroupUncontended(b *testing.B) {
type PaddedWaitGroup struct {
*waitGroup
pad [128]uint8
}
b.RunParallel(func(pb *testing.PB) {
wg := PaddedWaitGroup{
waitGroup: newWaitGroup(),
}
for pb.Next() {
wg.add(1)
wg.done()
wg.wait()
}
})
}
func benchmarkWaitGroupAdddone(b *testing.B, localWork int) {
wg := newWaitGroup()
b.RunParallel(func(pb *testing.PB) {
foo := 0
for pb.Next() {
wg.add(1)
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
wg.done()
}
_ = foo
})
}
func BenchmarkWaitGroupAdddone(b *testing.B) {
benchmarkWaitGroupAdddone(b, 0)
}
func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
benchmarkWaitGroupAdddone(b, 100)
}
func benchmarkWaitGroupwait(b *testing.B, localWork int) {
wg := newWaitGroup()
b.RunParallel(func(pb *testing.PB) {
foo := 0
for pb.Next() {
wg.wait()
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
}
_ = foo
})
}
func BenchmarkWaitGroupwait(b *testing.B) {
benchmarkWaitGroupwait(b, 0)
}
func BenchmarkWaitGroupWaitWork(b *testing.B) {
benchmarkWaitGroupwait(b, 100)
}
func BenchmarkWaitGroupActuallywait(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
wg := newWaitGroup()
wg.add(1)
go func() {
wg.done()
}()
wg.wait()
}
})
}

View File

@ -1,18 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// +build !appengine
package util
import (
"net"
)
// interfaceAddrs returns a list of the system's network interface addresses.
// It is wrapped here so that we can substitute it for other functions when
// building for systems that do not allow access to net.InterfaceAddrs().
func interfaceAddrs() ([]net.Addr, error) {
return net.InterfaceAddrs()
}

View File

@ -1,19 +0,0 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// +build appengine
package util
import (
"net"
)
// interfaceAddrs returns a list of the system's network interface addresses.
// It is wrapped here so that we can substitute it for a no-op function that
// returns an empty slice of net.Addr when building for systems that do not
// allow access to net.InterfaceAddrs().
func interfaceAddrs() ([]net.Addr, error) {
return []net.Addr{}, nil
}

View File

@ -1,73 +0,0 @@
package pointers
// Bool is a helper routine that allocates a new bool value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Bool(v bool) *bool {
p := new(bool)
*p = v
return p
}
// Int is a helper routine that allocates a new int value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Int(v int) *int {
p := new(int)
*p = v
return p
}
// Uint is a helper routine that allocates a new uint value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Uint(v uint) *uint {
p := new(uint)
*p = v
return p
}
// Int32 is a helper routine that allocates a new int32 value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Int32(v int32) *int32 {
p := new(int32)
*p = v
return p
}
// Uint32 is a helper routine that allocates a new uint32 value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Uint32(v uint32) *uint32 {
p := new(uint32)
*p = v
return p
}
// Int64 is a helper routine that allocates a new int64 value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Int64(v int64) *int64 {
p := new(int64)
*p = v
return p
}
// Uint64 is a helper routine that allocates a new uint64 value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Uint64(v uint64) *uint64 {
p := new(uint64)
*p = v
return p
}
// Float64 is a helper routine that allocates a new float64 value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func Float64(v float64) *float64 {
p := new(float64)
*p = v
return p
}
// String is a helper routine that allocates a new string value to store v and
// returns a pointer to it. This is useful when assigning optional parameters.
func String(v string) *string {
p := new(string)
*p = v
return p
}

View File

@ -1,113 +0,0 @@
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package pointers
import (
"reflect"
"testing"
)
// TestCopyToPointer tests the various helper functions which create pointers to
// primitive types.
func TestCopyToPointer(t *testing.T) {
t.Parallel()
tests := []struct {
name string
f func() interface{}
expected interface{}
}{
{
name: "bool",
f: func() interface{} {
return Bool(true)
},
expected: func() interface{} {
val := true
return &val
}(),
},
{
name: "int",
f: func() interface{} {
return Int(5)
},
expected: func() interface{} {
val := int(5)
return &val
}(),
},
{
name: "uint",
f: func() interface{} {
return Uint(5)
},
expected: func() interface{} {
val := uint(5)
return &val
}(),
},
{
name: "int32",
f: func() interface{} {
return Int32(5)
},
expected: func() interface{} {
val := int32(5)
return &val
}(),
},
{
name: "uint32",
f: func() interface{} {
return Uint32(5)
},
expected: func() interface{} {
val := uint32(5)
return &val
}(),
},
{
name: "int64",
f: func() interface{} {
return Int64(5)
},
expected: func() interface{} {
val := int64(5)
return &val
}(),
},
{
name: "uint64",
f: func() interface{} {
return Uint64(5)
},
expected: func() interface{} {
val := uint64(5)
return &val
}(),
},
{
name: "string",
f: func() interface{} {
return String("abc")
},
expected: func() interface{} {
val := "abc"
return &val
}(),
},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
result := test.f()
if !reflect.DeepEqual(result, test.expected) {
t.Errorf("Test #%d (%s) unexpected value - got %v, "+
"want %v", i, test.name, result, test.expected)
continue
}
}
}

View File

@ -2,23 +2,15 @@ package random
import (
"crypto/rand"
"io"
"github.com/kaspanet/kaspad/util/binaryserializer"
"encoding/binary"
)
// randomUint64 returns a cryptographically random uint64 value. This
// unexported version takes a reader primarily to ensure the error paths
// can be properly tested by passing a fake reader in the tests.
func randomUint64(r io.Reader) (uint64, error) {
rv, err := binaryserializer.Uint64(r)
if err != nil {
return 0, err
}
return rv, nil
}
// Uint64 returns a cryptographically random uint64 value.
func Uint64() (uint64, error) {
return randomUint64(rand.Reader)
var buf [8]byte
_, err := rand.Read(buf[:])
if err != nil {
return 0, err
}
return binary.LittleEndian.Uint64(buf[:]), nil
}

View File

@ -1,6 +1,7 @@
package random
import (
"crypto/rand"
"fmt"
"github.com/pkg/errors"
"io"
@ -61,8 +62,9 @@ func TestRandomUint64(t *testing.T) {
// and checks the results accordingly.
func TestRandomUint64Errors(t *testing.T) {
// Test short reads.
fr := &fakeRandReader{n: 2, err: io.EOF}
nonce, err := randomUint64(fr)
reader := rand.Reader
rand.Reader = &fakeRandReader{n: 2, err: io.EOF}
nonce, err := Uint64()
if !errors.Is(err, io.ErrUnexpectedEOF) {
t.Errorf("Error not expected value of %v [%v]",
io.ErrUnexpectedEOF, err)
@ -70,4 +72,5 @@ func TestRandomUint64Errors(t *testing.T) {
if nonce != 0 {
t.Errorf("Nonce is not 0 [%v]", nonce)
}
rand.Reader = reader
}

View File

@ -1,69 +0,0 @@
package util
import (
"strings"
"unicode"
)
// ToCamelCase converts a camelCase-ish string into a typical JSON camelCase string.
// Example conversion: MyJSONVariable -> myJsonVariable
func ToCamelCase(str string) string {
if len(str) == 0 {
return ""
}
// Split the string into words
words := make([]string, 0)
wordStartIndex := 0
wordEndIndex := -1
var previousCharacter rune
for i, character := range str {
if i > 0 {
if unicode.IsLower(previousCharacter) && unicode.IsUpper(character) {
// previousCharacter is definitely the end of a word
wordEndIndex = i - 1
word := str[wordStartIndex:i]
words = append(words, word)
} else if unicode.IsUpper(previousCharacter) && unicode.IsLower(character) {
// previousCharacter is definitely the start of a word
wordStartIndex = i - 1
if wordStartIndex-wordEndIndex > 1 {
// This handles consequent uppercase words, such as acronyms.
// Example: getBlockDAGInfo
// ^^^
word := str[wordEndIndex+1 : wordStartIndex]
words = append(words, word)
}
}
}
previousCharacter = character
}
if unicode.IsUpper(previousCharacter) {
// This handles consequent uppercase words, such as acronyms, at the end of the string
// Example: TxID
// ^^
for i := len(str) - 1; i >= 0; i-- {
if unicode.IsLower(rune(str[i])) {
break
}
wordStartIndex = i
}
}
lastWord := str[wordStartIndex:]
words = append(words, lastWord)
// Build a PascalCase string out of the words
var camelCaseBuilder strings.Builder
for _, word := range words {
lowercaseWord := strings.ToLower(word)
capitalizedWord := strings.ToUpper(string(lowercaseWord[0])) + lowercaseWord[1:]
camelCaseBuilder.WriteString(capitalizedWord)
}
camelCaseString := camelCaseBuilder.String()
// Un-capitalize the first character to covert PascalCase into camelCase
return strings.ToLower(string(camelCaseString[0])) + camelCaseString[1:]
}

View File

@ -1,61 +0,0 @@
package util
import "testing"
// TestToCamelCase tests whether ToCamelCase correctly converts camelCase-ish strings to camelCase.
func TestToCamelCase(t *testing.T) {
tests := []struct {
name string
input string
expectedResult string
}{
{
name: "single word that's already in camelCase",
input: "abc",
expectedResult: "abc",
},
{
name: "single word in PascalCase",
input: "Abc",
expectedResult: "abc",
},
{
name: "single word in all caps",
input: "ABC",
expectedResult: "abc",
},
{
name: "multiple words that are already in camelCase",
input: "aaaBbbCcc",
expectedResult: "aaaBbbCcc",
},
{
name: "multiple words in PascalCase",
input: "AaaBbbCcc",
expectedResult: "aaaBbbCcc",
},
{
name: "acronym in start position",
input: "AAABbbCcc",
expectedResult: "aaaBbbCcc",
},
{
name: "acronym in middle position",
input: "aaaBBBCcc",
expectedResult: "aaaBbbCcc",
},
{
name: "acronym in end position",
input: "aaaBbbCCC",
expectedResult: "aaaBbbCcc",
},
}
for _, test := range tests {
result := ToCamelCase(test.input)
if result != test.expectedResult {
t.Errorf("ToCamelCase for test \"%s\" returned an unexpected result. "+
"Expected: \"%s\", got: \"%s\"", test.name, test.expectedResult, result)
}
}
}

View File

@ -1,23 +0,0 @@
package testtools
import (
"time"
)
// WaitTillAllCompleteOrTimeout waits until all the provided channels has been written to,
// or until a timeout period has passed.
// Returns true iff all channels returned in the allotted time.
func WaitTillAllCompleteOrTimeout(timeoutDuration time.Duration, chans ...chan struct{}) (ok bool) {
timeout := time.After(timeoutDuration)
for _, c := range chans {
select {
case <-c:
continue
case <-timeout:
return false
}
}
return true
}

View File

@ -1,18 +0,0 @@
package util
import "sort"
// SearchSlice uses binary search to find and return the smallest index i
// in [0, n) at which f(i) is true, assuming that on the range [0, n),
// f(i) == true implies f(i+1) == true. That is, SearchSlice requires that
// f is false for some (possibly empty) prefix of the input range [0, n)
// and then true for the (possibly empty) remainder; SearchSlice returns
// the first true index.
// SearchSlice calls f(i) only for i in the range [0, n).
func SearchSlice(sliceLength int, searchFunc func(int) bool) (foundIndex int, ok bool) {
result := sort.Search(sliceLength, searchFunc)
if result == sliceLength {
return -1, false
}
return result, true
}