diff --git a/addressmanager/addressmanager.go b/addressmanager/addressmanager.go new file mode 100644 index 000000000..5d6d81469 --- /dev/null +++ b/addressmanager/addressmanager.go @@ -0,0 +1,1374 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package addressmanager + +import ( + "bytes" + crand "crypto/rand" // for seeding + "encoding/binary" + "encoding/gob" + "github.com/kaspanet/kaspad/config" + "github.com/kaspanet/kaspad/dbaccess" + "github.com/kaspanet/kaspad/util/mstime" + "github.com/pkg/errors" + "io" + "math/rand" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/kaspanet/kaspad/util/subnetworkid" + + "github.com/kaspanet/kaspad/util/daghash" + "github.com/kaspanet/kaspad/wire" +) + +// AddressKey represents a "string" key in the form of ip:port for IPv4 addresses +// or [ip]:port for IPv6 addresses for use as keys in maps. +type AddressKey string +type newAddressBucketArray [NewBucketCount]map[AddressKey]*KnownAddress +type triedAddressBucketArray [TriedBucketCount][]*KnownAddress + +// AddressManager provides a concurrency safe address manager for caching potential +// peers on the Kaspa network. +type AddressManager struct { + cfg *config.Config + databaseContext *dbaccess.DatabaseContext + + mutex sync.Mutex + lookupFunc func(string) ([]net.IP, error) + random *rand.Rand + key [32]byte + addressIndex map[AddressKey]*KnownAddress // address keys to known addresses for all addresses. + started int32 + shutdown int32 + wg sync.WaitGroup + quit chan struct{} + localAddressesLock sync.Mutex + localAddresses map[AddressKey]*localAddress + localSubnetworkID *subnetworkid.SubnetworkID + + fullNodeNewAddressBucketArray newAddressBucketArray + fullNodeNewAddressCount int + fullNodeTriedAddressBucketArray triedAddressBucketArray + fullNodeTriedAddressCount int + subnetworkNewAddressBucketArrays map[subnetworkid.SubnetworkID]*newAddressBucketArray + subnetworkNewAddressCounts map[subnetworkid.SubnetworkID]int + subnetworkTriedAddresBucketArrays map[subnetworkid.SubnetworkID]*triedAddressBucketArray + subnetworkTriedAddressCounts map[subnetworkid.SubnetworkID]int +} + +type serializedKnownAddress struct { + Address AddressKey + SourceAddress AddressKey + SubnetworkID string + Attempts int + TimeStamp int64 + LastAttempt int64 + LastSuccess int64 + // no refcount or tried, that is available from context. +} + +type serializedNewAddressBucketArray [NewBucketCount][]AddressKey +type serializedTriedAddressBucketArray [TriedBucketCount][]AddressKey + +// PeersStateForSerialization is the data model that is used to +// serialize the peers state to any encoding. +type PeersStateForSerialization struct { + Version int + Key [32]byte + Addresses []*serializedKnownAddress + + SubnetworkNewAddressBucketArrays map[string]*serializedNewAddressBucketArray // string is Subnetwork ID + FullNodeNewAddressBucketArray serializedNewAddressBucketArray + SubnetworkTriedAddressBucketArrays map[string]*serializedTriedAddressBucketArray // string is Subnetwork ID + FullNodeTriedAddressBucketArray serializedTriedAddressBucketArray +} + +type localAddress struct { + netAddress *wire.NetAddress + score AddressPriority +} + +// AddressPriority type is used to describe the hierarchy of local address +// discovery methods. +type AddressPriority int + +const ( + // InterfacePrio signifies the address is on a local interface + InterfacePrio AddressPriority = iota + + // BoundPrio signifies the address has been explicitly bounded to. + BoundPrio + + // UpnpPrio signifies the address was obtained from UPnP. + UpnpPrio + + // HTTPPrio signifies the address was obtained from an external HTTP service. + HTTPPrio + + // ManualPrio signifies the address was provided by --externalip. + ManualPrio +) + +const ( + // needAddressThreshold is the number of addresses under which the + // address manager will claim to need more addresses. + needAddressThreshold = 1000 + + // dumpAddressInterval is the interval used to dump the address + // cache to disk for future use. + dumpAddressInterval = time.Minute * 10 + + // triedBucketSize is the maximum number of addresses in each + // tried address bucket. + triedBucketSize = 256 + + // TriedBucketCount is the number of buckets we split tried + // addresses over. + TriedBucketCount = 64 + + // newBucketSize is the maximum number of addresses in each new address + // bucket. + newBucketSize = 64 + + // NewBucketCount is the number of buckets that we spread new addresses + // over. + NewBucketCount = 1024 + + // triedBucketsPerGroup is the number of tried buckets over which an + // address group will be spread. + triedBucketsPerGroup = 8 + + // newBucketsPerGroup is the number of new buckets over which an + // source address group will be spread. + newBucketsPerGroup = 64 + + // newBucketsPerAddress is the number of buckets a frequently seen new + // address may end up in. + newBucketsPerAddress = 8 + + // numMissingDays is the number of days before which we assume an + // address has vanished if we have not seen it announced in that long. + numMissingDays = 30 + + // numRetries is the number of tried without a single success before + // we assume an address is bad. + numRetries = 3 + + // maxFailures is the maximum number of failures we will accept without + // a success before considering an address bad. + maxFailures = 10 + + // minBadDays is the number of days since the last success before we + // will consider evicting an address. + minBadDays = 7 + + // getAddrMin is the least addresses that we will send in response + // to a getAddresses. If we have less than this amount, we send everything. + getAddrMin = 50 + + // GetAddressesMax is the most addresses that we will send in response + // to a getAddress (in practise the most addresses we will return from a + // call to AddressCache()). + GetAddressesMax = 2500 + + // getAddrPercent is the percentage of total addresses known that we + // will share with a call to AddressCache. + getAddrPercent = 23 + + // serializationVersion is the current version of the on-disk format. + serializationVersion = 1 +) + +// New returns a new Kaspa address manager. +func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext) *AddressManager { + addressManager := AddressManager{ + cfg: cfg, + databaseContext: databaseContext, + lookupFunc: cfg.Lookup, + random: rand.New(rand.NewSource(time.Now().UnixNano())), + quit: make(chan struct{}), + localAddresses: make(map[AddressKey]*localAddress), + localSubnetworkID: cfg.SubnetworkID, + } + addressManager.reset() + return &addressManager +} + +// updateAddress is a helper function to either update an address already known +// to the address manager, or to add the address if not already known. +func (am *AddressManager) updateAddress(netAddress, sourceAddress *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { + // Filter out non-routable addresses. Note that non-routable + // also includes invalid and local addresses. + if !am.IsRoutable(netAddress) { + return + } + + addressKey := NetAddressKey(netAddress) + knownAddress := am.knownAddress(netAddress) + if knownAddress != nil { + // TODO: only update addresses periodically. + // Update the last seen time and services. + // note that to prevent causing excess garbage on getaddr + // messages the netaddresses in addrmaanger are *immutable*, + // if we need to change them then we replace the pointer with a + // new copy so that we don't have to copy every netAddress for getaddress. + if netAddress.Timestamp.After(knownAddress.netAddress.Timestamp) || + (knownAddress.netAddress.Services&netAddress.Services) != + netAddress.Services { + + netAddressCopy := *knownAddress.netAddress + netAddressCopy.Timestamp = netAddress.Timestamp + netAddressCopy.AddService(netAddress.Services) + knownAddress.netAddress = &netAddressCopy + } + + // If already in tried, we have nothing to do here. + if knownAddress.tried { + return + } + + // Already at our max? + if knownAddress.referenceCount == newBucketsPerAddress { + return + } + + // The more entries we have, the less likely we are to add more. + // likelihood is 2N. + factor := int32(2 * knownAddress.referenceCount) + if am.random.Int31n(factor) != 0 { + return + } + } else { + // Make a copy of the net address to avoid races since it is + // updated elsewhere in the addressManager code and would otherwise + // change the actual netAddress on the peer. + netAddressCopy := *netAddress + knownAddress = &KnownAddress{netAddress: &netAddressCopy, sourceAddress: sourceAddress, subnetworkID: subnetworkID} + am.addressIndex[addressKey] = knownAddress + am.incrementNewAddressCount(subnetworkID) + } + + // Already exists? + newAddressBucketArray := am.newAddressBucketArray(knownAddress.subnetworkID) + newAddressBucketIndex := am.newAddressBucketIndex(netAddress, sourceAddress) + if newAddressBucketArray != nil { + if _, ok := newAddressBucketArray[newAddressBucketIndex][addressKey]; ok { + return + } + } + + // Enforce max addresses. + if newAddressBucketArray != nil && len(newAddressBucketArray[newAddressBucketIndex]) > newBucketSize { + log.Tracef("new bucket is full, expiring old") + am.expireNew(knownAddress.subnetworkID, newAddressBucketIndex) + } + + // Add to new bucket. + knownAddress.referenceCount++ + am.updateAddrNew(newAddressBucketIndex, addressKey, knownAddress) + + totalAddressCount := am.newAddressCount(knownAddress.subnetworkID) + am.triedAddressCount(knownAddress.subnetworkID) + log.Tracef("Added new address %s for a total of %d addresses", addressKey, totalAddressCount) + +} + +func (am *AddressManager) updateAddrNew(bucket int, addressKey AddressKey, knownAddress *KnownAddress) { + if knownAddress.subnetworkID == nil { + am.fullNodeNewAddressBucketArray[bucket][addressKey] = knownAddress + return + } + + if _, ok := am.subnetworkNewAddressBucketArrays[*knownAddress.subnetworkID]; !ok { + am.subnetworkNewAddressBucketArrays[*knownAddress.subnetworkID] = &newAddressBucketArray{} + for i := range am.subnetworkNewAddressBucketArrays[*knownAddress.subnetworkID] { + am.subnetworkNewAddressBucketArrays[*knownAddress.subnetworkID][i] = make(map[AddressKey]*KnownAddress) + } + } + am.subnetworkNewAddressBucketArrays[*knownAddress.subnetworkID][bucket][addressKey] = knownAddress +} + +func (am *AddressManager) updateAddrTried(bucketIndex int, knownAddress *KnownAddress) { + if knownAddress.subnetworkID == nil { + am.fullNodeTriedAddressBucketArray[bucketIndex] = append(am.fullNodeTriedAddressBucketArray[bucketIndex], knownAddress) + return + } + + if _, ok := am.subnetworkTriedAddresBucketArrays[*knownAddress.subnetworkID]; !ok { + am.subnetworkTriedAddresBucketArrays[*knownAddress.subnetworkID] = &triedAddressBucketArray{} + for i := range am.subnetworkTriedAddresBucketArrays[*knownAddress.subnetworkID] { + am.subnetworkTriedAddresBucketArrays[*knownAddress.subnetworkID][i] = nil + } + } + am.subnetworkTriedAddresBucketArrays[*knownAddress.subnetworkID][bucketIndex] = append(am.subnetworkTriedAddresBucketArrays[*knownAddress.subnetworkID][bucketIndex], knownAddress) +} + +// expireNew makes space in the new buckets by expiring the really bad entries. +// If no bad entries are available we look at a few and remove the oldest. +func (am *AddressManager) expireNew(subnetworkID *subnetworkid.SubnetworkID, bucketIndex int) { + // First see if there are any entries that are so bad we can just throw + // them away. otherwise we throw away the oldest entry in the cache. + // We keep track of oldest in the initial traversal and use that + // information instead. + var oldest *KnownAddress + newAddressBucketArray := am.newAddressBucketArray(subnetworkID) + for addressKey, knownAddress := range newAddressBucketArray[bucketIndex] { + if knownAddress.isBad() { + log.Tracef("expiring bad address %s", addressKey) + delete(newAddressBucketArray[bucketIndex], addressKey) + knownAddress.referenceCount-- + if knownAddress.referenceCount == 0 { + am.decrementNewAddressCount(subnetworkID) + delete(am.addressIndex, addressKey) + } + continue + } + if oldest == nil { + oldest = knownAddress + } else if !knownAddress.netAddress.Timestamp.After(oldest.netAddress.Timestamp) { + oldest = knownAddress + } + } + + if oldest != nil { + addressKey := NetAddressKey(oldest.netAddress) + log.Tracef("expiring oldest address %s", addressKey) + + delete(newAddressBucketArray[bucketIndex], addressKey) + oldest.referenceCount-- + if oldest.referenceCount == 0 { + am.decrementNewAddressCount(subnetworkID) + delete(am.addressIndex, addressKey) + } + } +} + +// pickTried selects an address from the tried bucket to be evicted. +// We just choose the eldest. +func (am *AddressManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucketIndex int) ( + knownAddress *KnownAddress, knownAddressIndex int) { + + var oldest *KnownAddress + oldestIndex := -1 + triedAddressBucketArray := am.triedAddressBucketArray(subnetworkID) + for i, address := range triedAddressBucketArray[bucketIndex] { + if oldest == nil || oldest.netAddress.Timestamp.After(address.netAddress.Timestamp) { + oldestIndex = i + oldest = address + } + } + return oldest, oldestIndex +} + +func (am *AddressManager) newAddressBucketIndex(netAddress, srcAddress *wire.NetAddress) int { + // doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets + + data1 := []byte{} + data1 = append(data1, am.key[:]...) + data1 = append(data1, []byte(am.GroupKey(netAddress))...) + data1 = append(data1, []byte(am.GroupKey(srcAddress))...) + hash1 := daghash.DoubleHashB(data1) + hash64 := binary.LittleEndian.Uint64(hash1) + hash64 %= newBucketsPerGroup + var hashbuf [8]byte + binary.LittleEndian.PutUint64(hashbuf[:], hash64) + data2 := []byte{} + data2 = append(data2, am.key[:]...) + data2 = append(data2, am.GroupKey(srcAddress)...) + data2 = append(data2, hashbuf[:]...) + + hash2 := daghash.DoubleHashB(data2) + return int(binary.LittleEndian.Uint64(hash2) % NewBucketCount) +} + +func (am *AddressManager) triedAddressBucketIndex(netAddress *wire.NetAddress) int { + // doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets + data1 := []byte{} + data1 = append(data1, am.key[:]...) + data1 = append(data1, []byte(NetAddressKey(netAddress))...) + hash1 := daghash.DoubleHashB(data1) + hash64 := binary.LittleEndian.Uint64(hash1) + hash64 %= triedBucketsPerGroup + var hashbuf [8]byte + binary.LittleEndian.PutUint64(hashbuf[:], hash64) + data2 := []byte{} + data2 = append(data2, am.key[:]...) + data2 = append(data2, am.GroupKey(netAddress)...) + data2 = append(data2, hashbuf[:]...) + + hash2 := daghash.DoubleHashB(data2) + return int(binary.LittleEndian.Uint64(hash2) % TriedBucketCount) +} + +// addressHandler is the main handler for the address manager. It must be run +// as a goroutine. +func (am *AddressManager) addressHandler() { + dumpAddressTicker := time.NewTicker(dumpAddressInterval) + defer dumpAddressTicker.Stop() + +out: + for { + select { + case <-dumpAddressTicker.C: + err := am.savePeers() + if err != nil { + panic(errors.Wrap(err, "error saving peers")) + } + + case <-am.quit: + break out + } + } + err := am.savePeers() + if err != nil { + panic(errors.Wrap(err, "error saving peers")) + } + am.wg.Done() + log.Trace("Address handler done") +} + +// savePeers saves all the known addresses to the database so they can be read back +// in at next run. +func (am *AddressManager) savePeers() error { + serializedPeersState, err := am.serializePeersState() + if err != nil { + return err + } + + return dbaccess.StorePeersState(am.databaseContext, serializedPeersState) +} + +func (am *AddressManager) serializePeersState() ([]byte, error) { + peersState, err := am.PeersStateForSerialization() + if err != nil { + return nil, err + } + + buffer := &bytes.Buffer{} + encoder := gob.NewEncoder(buffer) + err = encoder.Encode(&peersState) + if err != nil { + return nil, errors.Wrap(err, "failed to encode peers state") + } + + return buffer.Bytes(), nil +} + +// PeersStateForSerialization returns the data model that is used to serialize the peers state to any encoding. +func (am *AddressManager) PeersStateForSerialization() (*PeersStateForSerialization, error) { + am.mutex.Lock() + defer am.mutex.Unlock() + + // First we make a serializable data structure so we can encode it to + // gob. + peersState := new(PeersStateForSerialization) + peersState.Version = serializationVersion + copy(peersState.Key[:], am.key[:]) + + peersState.Addresses = make([]*serializedKnownAddress, len(am.addressIndex)) + i := 0 + for addressKey, knownAddress := range am.addressIndex { + serializedAddress := new(serializedKnownAddress) + serializedAddress.Address = addressKey + if knownAddress.subnetworkID == nil { + serializedAddress.SubnetworkID = "" + } else { + serializedAddress.SubnetworkID = knownAddress.subnetworkID.String() + } + serializedAddress.TimeStamp = knownAddress.netAddress.Timestamp.UnixMilliseconds() + serializedAddress.SourceAddress = NetAddressKey(knownAddress.sourceAddress) + serializedAddress.Attempts = knownAddress.attempts + serializedAddress.LastAttempt = knownAddress.lastAttempt.UnixMilliseconds() + serializedAddress.LastSuccess = knownAddress.lastSuccess.UnixMilliseconds() + // Tried and referenceCount are implicit in the rest of the structure + // and will be worked out from context on unserialisation. + peersState.Addresses[i] = serializedAddress + i++ + } + + peersState.SubnetworkNewAddressBucketArrays = make(map[string]*serializedNewAddressBucketArray) + for subnetworkID := range am.subnetworkNewAddressBucketArrays { + subnetworkIDStr := subnetworkID.String() + peersState.SubnetworkNewAddressBucketArrays[subnetworkIDStr] = &serializedNewAddressBucketArray{} + + for i := range am.subnetworkNewAddressBucketArrays[subnetworkID] { + peersState.SubnetworkNewAddressBucketArrays[subnetworkIDStr][i] = make([]AddressKey, len(am.subnetworkNewAddressBucketArrays[subnetworkID][i])) + j := 0 + for k := range am.subnetworkNewAddressBucketArrays[subnetworkID][i] { + peersState.SubnetworkNewAddressBucketArrays[subnetworkIDStr][i][j] = k + j++ + } + } + } + + for i := range am.fullNodeNewAddressBucketArray { + peersState.FullNodeNewAddressBucketArray[i] = make([]AddressKey, len(am.fullNodeNewAddressBucketArray[i])) + j := 0 + for k := range am.fullNodeNewAddressBucketArray[i] { + peersState.FullNodeNewAddressBucketArray[i][j] = k + j++ + } + } + + peersState.SubnetworkTriedAddressBucketArrays = make(map[string]*serializedTriedAddressBucketArray) + for subnetworkID := range am.subnetworkTriedAddresBucketArrays { + subnetworkIDStr := subnetworkID.String() + peersState.SubnetworkTriedAddressBucketArrays[subnetworkIDStr] = &serializedTriedAddressBucketArray{} + + for i := range am.subnetworkTriedAddresBucketArrays[subnetworkID] { + peersState.SubnetworkTriedAddressBucketArrays[subnetworkIDStr][i] = make([]AddressKey, len(am.subnetworkTriedAddresBucketArrays[subnetworkID][i])) + j := 0 + for _, knownAddress := range am.subnetworkTriedAddresBucketArrays[subnetworkID][i] { + peersState.SubnetworkTriedAddressBucketArrays[subnetworkIDStr][i][j] = NetAddressKey(knownAddress.netAddress) + j++ + } + } + } + + for i := range am.fullNodeTriedAddressBucketArray { + peersState.FullNodeTriedAddressBucketArray[i] = make([]AddressKey, len(am.fullNodeTriedAddressBucketArray[i])) + j := 0 + for _, knownAddress := range am.fullNodeTriedAddressBucketArray[i] { + peersState.FullNodeTriedAddressBucketArray[i][j] = NetAddressKey(knownAddress.netAddress) + j++ + } + } + + return peersState, nil +} + +// loadPeers loads the known address from the database. If missing, +// just don't load anything and start fresh. +func (am *AddressManager) loadPeers() error { + am.mutex.Lock() + defer am.mutex.Unlock() + + serializedPeerState, err := dbaccess.FetchPeersState(am.databaseContext) + if dbaccess.IsNotFoundError(err) { + am.reset() + log.Info("No peers state was found in the database. Created a new one", am.totalNumAddresses()) + return nil + } + if err != nil { + return err + } + + err = am.deserializePeersState(serializedPeerState) + if err != nil { + return err + } + + log.Infof("Loaded %d addresses from database", am.totalNumAddresses()) + return nil +} + +func (am *AddressManager) deserializePeersState(serializedPeerState []byte) error { + var peersState PeersStateForSerialization + r := bytes.NewBuffer(serializedPeerState) + dec := gob.NewDecoder(r) + err := dec.Decode(&peersState) + if err != nil { + return errors.Wrap(err, "error deserializing peers state") + } + + if peersState.Version != serializationVersion { + return errors.Errorf("unknown version %d in serialized "+ + "peers state", peersState.Version) + } + copy(am.key[:], peersState.Key[:]) + + for _, serializedKnownAddress := range peersState.Addresses { + knownAddress := new(KnownAddress) + knownAddress.netAddress, err = am.DeserializeNetAddress(serializedKnownAddress.Address) + if err != nil { + return errors.Errorf("failed to deserialize netaddress "+ + "%s: %s", serializedKnownAddress.Address, err) + } + knownAddress.sourceAddress, err = am.DeserializeNetAddress(serializedKnownAddress.SourceAddress) + if err != nil { + return errors.Errorf("failed to deserialize netaddress "+ + "%s: %s", serializedKnownAddress.SourceAddress, err) + } + if serializedKnownAddress.SubnetworkID != "" { + knownAddress.subnetworkID, err = subnetworkid.NewFromStr(serializedKnownAddress.SubnetworkID) + if err != nil { + return errors.Errorf("failed to deserialize subnetwork id "+ + "%s: %s", serializedKnownAddress.SubnetworkID, err) + } + } + knownAddress.attempts = serializedKnownAddress.Attempts + knownAddress.lastAttempt = mstime.UnixMilliseconds(serializedKnownAddress.LastAttempt) + knownAddress.lastSuccess = mstime.UnixMilliseconds(serializedKnownAddress.LastSuccess) + am.addressIndex[NetAddressKey(knownAddress.netAddress)] = knownAddress + } + + for subnetworkIDStr := range peersState.SubnetworkNewAddressBucketArrays { + subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) + if err != nil { + return err + } + for i, subnetworkNewAddressBucket := range peersState.SubnetworkNewAddressBucketArrays[subnetworkIDStr] { + for _, addressKey := range subnetworkNewAddressBucket { + knownAddress, ok := am.addressIndex[addressKey] + if !ok { + return errors.Errorf("newbucket contains %s but "+ + "none in address list", addressKey) + } + + if knownAddress.referenceCount == 0 { + am.subnetworkNewAddressCounts[*subnetworkID]++ + } + knownAddress.referenceCount++ + am.updateAddrNew(i, addressKey, knownAddress) + } + } + } + + for i, fullNodeNewAddressBucket := range peersState.FullNodeNewAddressBucketArray { + for _, addressKey := range fullNodeNewAddressBucket { + knownAddress, ok := am.addressIndex[addressKey] + if !ok { + return errors.Errorf("full nodes newbucket contains %s but "+ + "none in address list", addressKey) + } + + if knownAddress.referenceCount == 0 { + am.fullNodeNewAddressCount++ + } + knownAddress.referenceCount++ + am.updateAddrNew(i, addressKey, knownAddress) + } + } + + for subnetworkIDString := range peersState.SubnetworkTriedAddressBucketArrays { + subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDString) + if err != nil { + return err + } + for i, subnetworkTriedAddressBucket := range peersState.SubnetworkTriedAddressBucketArrays[subnetworkIDString] { + for _, addressKey := range subnetworkTriedAddressBucket { + knownAddress, ok := am.addressIndex[addressKey] + if !ok { + return errors.Errorf("Tried bucket contains %s but "+ + "none in address list", addressKey) + } + + knownAddress.tried = true + am.subnetworkTriedAddressCounts[*subnetworkID]++ + am.subnetworkTriedAddresBucketArrays[*subnetworkID][i] = append(am.subnetworkTriedAddresBucketArrays[*subnetworkID][i], knownAddress) + } + } + } + + for i, fullNodeTriedAddressBucket := range peersState.FullNodeTriedAddressBucketArray { + for _, addressKey := range fullNodeTriedAddressBucket { + knownAddress, ok := am.addressIndex[addressKey] + if !ok { + return errors.Errorf("Full nodes tried bucket contains %s but "+ + "none in address list", addressKey) + } + + knownAddress.tried = true + am.fullNodeTriedAddressCount++ + am.fullNodeTriedAddressBucketArray[i] = append(am.fullNodeTriedAddressBucketArray[i], knownAddress) + } + } + + // Sanity checking. + for addressKey, knownAddress := range am.addressIndex { + if knownAddress.referenceCount == 0 && !knownAddress.tried { + return errors.Errorf("address %s after serialisation "+ + "with no references", addressKey) + } + + if knownAddress.referenceCount > 0 && knownAddress.tried { + return errors.Errorf("address %s after serialisation "+ + "which is both new and tried!", addressKey) + } + } + + return nil +} + +// DeserializeNetAddress converts a given address string to a *wire.NetAddress +func (am *AddressManager) DeserializeNetAddress(addressKey AddressKey) (*wire.NetAddress, error) { + host, portString, err := net.SplitHostPort(string(addressKey)) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + + return am.HostToNetAddress(host, uint16(port), wire.SFNodeNetwork) +} + +// Start begins the core address handler which manages a pool of known +// addresses, timeouts, and interval based writes. +func (am *AddressManager) Start() error { + // Already started? + if atomic.AddInt32(&am.started, 1) != 1 { + return nil + } + + log.Trace("Starting address manager") + + // Load peers we already know about from the database. + err := am.loadPeers() + if err != nil { + return err + } + + // Start the address ticker to save addresses periodically. + am.wg.Add(1) + spawn("addressManager.addressHandler", am.addressHandler) + return nil +} + +// Stop gracefully shuts down the address manager by stopping the main handler. +func (am *AddressManager) Stop() error { + if atomic.AddInt32(&am.shutdown, 1) != 1 { + log.Warnf("Address manager is already in the process of " + + "shutting down") + return nil + } + + log.Infof("Address manager shutting down") + close(am.quit) + am.wg.Wait() + return nil +} + +// AddAddresses adds new addresses to the address manager. It enforces a max +// number of addresses and silently ignores duplicate addresses. It is +// safe for concurrent access. +func (am *AddressManager) AddAddresses(addresses []*wire.NetAddress, sourceAddress *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { + am.mutex.Lock() + defer am.mutex.Unlock() + + for _, address := range addresses { + am.updateAddress(address, sourceAddress, subnetworkID) + } +} + +// AddAddress adds a new address to the address manager. It enforces a max +// number of addresses and silently ignores duplicate addresses. It is +// safe for concurrent access. +func (am *AddressManager) AddAddress(address, sourceAddress *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { + am.mutex.Lock() + defer am.mutex.Unlock() + + am.updateAddress(address, sourceAddress, subnetworkID) +} + +// AddAddressByIP adds an address where we are given an ip:port and not a +// wire.NetAddress. +func (am *AddressManager) AddAddressByIP(addressIP string, subnetworkID *subnetworkid.SubnetworkID) error { + // Split IP and port + ipString, portString, err := net.SplitHostPort(addressIP) + if err != nil { + return err + } + // Put it in wire.Netaddress + ip := net.ParseIP(ipString) + if ip == nil { + return errors.Errorf("invalid ip %s", ipString) + } + port, err := strconv.ParseUint(portString, 10, 0) + if err != nil { + return errors.Errorf("invalid port %s: %s", portString, err) + } + netAddress := wire.NewNetAddressIPPort(ip, uint16(port), 0) + am.AddAddress(netAddress, netAddress, subnetworkID) // XXX use correct src address + return nil +} + +// numAddresses returns the number of addresses that belongs to a specific subnetwork id +// which are known to the address manager. +func (am *AddressManager) numAddresses(subnetworkID *subnetworkid.SubnetworkID) int { + if subnetworkID == nil { + return am.fullNodeNewAddressCount + am.fullNodeTriedAddressCount + } + return am.subnetworkTriedAddressCounts[*subnetworkID] + am.subnetworkNewAddressCounts[*subnetworkID] +} + +// totalNumAddresses returns the number of addresses known to the address manager. +func (am *AddressManager) totalNumAddresses() int { + total := am.fullNodeNewAddressCount + am.fullNodeTriedAddressCount + for _, numAddresses := range am.subnetworkTriedAddressCounts { + total += numAddresses + } + for _, numAddresses := range am.subnetworkNewAddressCounts { + total += numAddresses + } + return total +} + +// TotalNumAddresses returns the number of addresses known to the address manager. +func (am *AddressManager) TotalNumAddresses() int { + am.mutex.Lock() + defer am.mutex.Unlock() + + return am.totalNumAddresses() +} + +// NeedMoreAddresses returns whether or not the address manager needs more +// addresses. +func (am *AddressManager) NeedMoreAddresses() bool { + am.mutex.Lock() + defer am.mutex.Unlock() + + allAddresses := am.numAddresses(am.localSubnetworkID) + if am.localSubnetworkID != nil { + allAddresses += am.numAddresses(nil) + } + return allAddresses < needAddressThreshold +} + +// AddressCache returns the current address cache. It must be treated as +// read-only (but since it is a copy now, this is not as dangerous). +func (am *AddressManager) AddressCache(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) []*wire.NetAddress { + am.mutex.Lock() + defer am.mutex.Unlock() + + if len(am.addressIndex) == 0 { + return nil + } + + allAddresses := []*wire.NetAddress{} + // Iteration order is undefined here, but we randomise it anyway. + for _, v := range am.addressIndex { + if includeAllSubnetworks || v.SubnetworkID().IsEqual(subnetworkID) { + allAddresses = append(allAddresses, v.netAddress) + } + } + + numAddresses := len(allAddresses) * getAddrPercent / 100 + if numAddresses > GetAddressesMax { + numAddresses = GetAddressesMax + } + if len(allAddresses) < getAddrMin { + numAddresses = len(allAddresses) + } + if len(allAddresses) > getAddrMin && numAddresses < getAddrMin { + numAddresses = getAddrMin + } + + // Fisher-Yates shuffle the array. We only need to do the first + // `numAddresses' since we are throwing the rest. + for i := 0; i < numAddresses; i++ { + // pick a number between current index and the end + j := rand.Intn(len(allAddresses)-i) + i + allAddresses[i], allAddresses[j] = allAddresses[j], allAddresses[i] + } + + // slice off the limit we are willing to share. + return allAddresses[0:numAddresses] +} + +// reset resets the address manager by reinitialising the random source +// and allocating fresh empty bucket storage. +func (am *AddressManager) reset() { + am.addressIndex = make(map[AddressKey]*KnownAddress) + + // fill key with bytes from a good random source. + io.ReadFull(crand.Reader, am.key[:]) + am.subnetworkNewAddressBucketArrays = make(map[subnetworkid.SubnetworkID]*newAddressBucketArray) + am.subnetworkTriedAddresBucketArrays = make(map[subnetworkid.SubnetworkID]*triedAddressBucketArray) + + am.subnetworkNewAddressCounts = make(map[subnetworkid.SubnetworkID]int) + am.subnetworkTriedAddressCounts = make(map[subnetworkid.SubnetworkID]int) + + for i := range am.fullNodeNewAddressBucketArray { + am.fullNodeNewAddressBucketArray[i] = make(map[AddressKey]*KnownAddress) + } + for i := range am.fullNodeTriedAddressBucketArray { + am.fullNodeTriedAddressBucketArray[i] = nil + } + am.fullNodeNewAddressCount = 0 + am.fullNodeTriedAddressCount = 0 +} + +// HostToNetAddress returns a netaddress given a host address. If +// the host is not an IP address it will be resolved. +func (am *AddressManager) HostToNetAddress(host string, port uint16, services wire.ServiceFlag) (*wire.NetAddress, error) { + ip := net.ParseIP(host) + if ip == nil { + ips, err := am.lookupFunc(host) + if err != nil { + return nil, err + } + if len(ips) == 0 { + return nil, errors.Errorf("no addresses found for %s", host) + } + ip = ips[0] + } + + return wire.NewNetAddressIPPort(ip, port, services), nil +} + +// NetAddressKey returns a key in the form of ip:port for IPv4 addresses +// or [ip]:port for IPv6 addresses for use as keys in maps. +func NetAddressKey(netAddress *wire.NetAddress) AddressKey { + port := strconv.FormatUint(uint64(netAddress.Port), 10) + + return AddressKey(net.JoinHostPort(netAddress.IP.String(), port)) +} + +// GetAddress returns a single address that should be routable. It picks a +// random one from the possible addresses with preference given to ones that +// have not been used recently and should not pick 'close' addresses +// consecutively. +func (am *AddressManager) GetAddress() *KnownAddress { + // Protect concurrent access. + am.mutex.Lock() + defer am.mutex.Unlock() + + triedAddressBucketArray := am.triedAddressBucketArray(am.localSubnetworkID) + triedAddressCount := am.triedAddressCount(am.localSubnetworkID) + newAddressBucketArray := am.newAddressBucketArray(am.localSubnetworkID) + newAddressCount := am.newAddressCount(am.localSubnetworkID) + knownAddress := am.getAddress(triedAddressBucketArray, triedAddressCount, newAddressBucketArray, newAddressCount) + + return knownAddress + +} + +// getAddress returns a single address that should be routable. +// See GetAddress for further details. +func (am *AddressManager) getAddress(triedAddressBucketArray *triedAddressBucketArray, triedAddressCount int, + newAddressBucketArray *newAddressBucketArray, newAddressCount int) *KnownAddress { + + // Use a 50% chance for choosing between tried and new addresses. + var bucketArray addressBucketArray + if triedAddressCount > 0 && (newAddressCount == 0 || am.random.Intn(2) == 0) { + bucketArray = triedAddressBucketArray + } else if newAddressCount > 0 { + bucketArray = newAddressBucketArray + } else { + // There aren't any addresses in any of the buckets + return nil + } + + // Pick a random bucket + randomBucket := bucketArray.randomBucket(am.random) + + // Get the sum of all chances + totalChance := float64(0) + for _, knownAddress := range randomBucket { + totalChance += knownAddress.chance() + } + + // Pick a random address weighted by chance + randomValue := am.random.Float64() + accumulatedChance := float64(0) + for _, knownAddress := range randomBucket { + normalizedChance := knownAddress.chance() / totalChance + accumulatedChance += normalizedChance + if randomValue < accumulatedChance { + return knownAddress + } + } + + panic("randomValue is equal to or greater than 1, which cannot happen") +} + +type addressBucketArray interface { + name() string + randomBucket(random *rand.Rand) []*KnownAddress +} + +func (nb *newAddressBucketArray) randomBucket(random *rand.Rand) []*KnownAddress { + nonEmptyBuckets := make([]map[AddressKey]*KnownAddress, 0, NewBucketCount) + for _, bucket := range nb { + if len(bucket) > 0 { + nonEmptyBuckets = append(nonEmptyBuckets, bucket) + } + } + randomIndex := random.Intn(len(nonEmptyBuckets)) + randomBucket := nonEmptyBuckets[randomIndex] + + // Collect the known addresses into a slice + randomBucketSlice := make([]*KnownAddress, 0, len(randomBucket)) + for _, knownAddress := range randomBucket { + randomBucketSlice = append(randomBucketSlice, knownAddress) + } + return randomBucketSlice +} + +func (nb *newAddressBucketArray) name() string { + return "new" +} + +func (tb *triedAddressBucketArray) randomBucket(random *rand.Rand) []*KnownAddress { + nonEmptyBuckets := make([][]*KnownAddress, 0, TriedBucketCount) + for _, bucket := range tb { + if len(bucket) > 0 { + nonEmptyBuckets = append(nonEmptyBuckets, bucket) + } + } + randomIndex := random.Intn(len(nonEmptyBuckets)) + return nonEmptyBuckets[randomIndex] +} + +func (tb *triedAddressBucketArray) name() string { + return "tried" +} + +func (am *AddressManager) knownAddress(address *wire.NetAddress) *KnownAddress { + return am.addressIndex[NetAddressKey(address)] +} + +// Attempt increases the given address' attempt counter and updates +// the last attempt time. +func (am *AddressManager) Attempt(address *wire.NetAddress) { + am.mutex.Lock() + defer am.mutex.Unlock() + + // find address. + // Surely address will be in tried by now? + knownAddress := am.knownAddress(address) + if knownAddress == nil { + return + } + // set last tried time to now + knownAddress.attempts++ + knownAddress.lastAttempt = mstime.Now() +} + +// Connected Marks the given address as currently connected and working at the +// current time. The address must already be known to AddressManager else it will +// be ignored. +func (am *AddressManager) Connected(address *wire.NetAddress) { + am.mutex.Lock() + defer am.mutex.Unlock() + + knownAddress := am.knownAddress(address) + if knownAddress == nil { + return + } + + // Update the time as long as it has been 20 minutes since last we did + // so. + now := mstime.Now() + if now.After(knownAddress.netAddress.Timestamp.Add(time.Minute * 20)) { + // knownAddress.netAddress is immutable, so replace it. + netAddressCopy := *knownAddress.netAddress + netAddressCopy.Timestamp = mstime.Now() + knownAddress.netAddress = &netAddressCopy + } +} + +// Good marks the given address as good. To be called after a successful +// connection and version exchange. If the address is unknown to the address +// manager it will be ignored. +func (am *AddressManager) Good(address *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { + am.mutex.Lock() + defer am.mutex.Unlock() + + knownAddress := am.knownAddress(address) + if knownAddress == nil { + return + } + oldSubnetworkID := knownAddress.subnetworkID + + // knownAddress.Timestamp is not updated here to avoid leaking information + // about currently connected peers. + now := mstime.Now() + knownAddress.lastSuccess = now + knownAddress.lastAttempt = now + knownAddress.attempts = 0 + knownAddress.subnetworkID = subnetworkID + + addressKey := NetAddressKey(address) + triedAddressBucketIndex := am.triedAddressBucketIndex(knownAddress.netAddress) + + if knownAddress.tried { + // If this address was already tried, and subnetworkID didn't change - don't do anything + if subnetworkID.IsEqual(oldSubnetworkID) { + return + } + + // If this address was already tried, but subnetworkID was changed - + // update subnetworkID, than continue as though this is a new address + bucket := am.subnetworkTriedAddresBucketArrays[*oldSubnetworkID][triedAddressBucketIndex] + var toRemoveIndex int + toRemoveIndexFound := false + for i, knownAddress := range bucket { + if NetAddressKey(knownAddress.NetAddress()) == addressKey { + toRemoveIndex = i + toRemoveIndexFound = true + break + } + } + if toRemoveIndexFound { + am.subnetworkTriedAddresBucketArrays[*oldSubnetworkID][triedAddressBucketIndex] = + append(bucket[:toRemoveIndex], bucket[toRemoveIndex+1:]...) + } + } + + // Ok, need to move it to tried. + + // Remove from all new buckets. + // Record one of the buckets in question and call it the `oldBucketIndex' + var oldBucketIndex int + oldBucketIndexFound := false + if !knownAddress.tried { + newAddressBucketArray := am.newAddressBucketArray(oldSubnetworkID) + for i := range newAddressBucketArray { + // we check for existence so we can record the first one + if _, ok := newAddressBucketArray[i][addressKey]; ok { + if !oldBucketIndexFound { + oldBucketIndex = i + oldBucketIndexFound = true + } + + delete(newAddressBucketArray[i], addressKey) + knownAddress.referenceCount-- + } + } + + am.decrementNewAddressCount(oldSubnetworkID) + } + + // Room in this tried bucket? + triedAddressBucketArray := am.triedAddressBucketArray(knownAddress.subnetworkID) + triedAddressCount := am.triedAddressCount(knownAddress.subnetworkID) + if triedAddressCount == 0 || len(triedAddressBucketArray[triedAddressBucketIndex]) < triedBucketSize { + knownAddress.tried = true + am.updateAddrTried(triedAddressBucketIndex, knownAddress) + am.incrementTriedAddressCount(knownAddress.subnetworkID) + return + } + + // No room, we have to evict something else. + knownAddressToRemove, knownAddressToRemoveIndex := am.pickTried(knownAddress.subnetworkID, triedAddressBucketIndex) + + // First bucket index it would have been put in. + newAddressBucketIndex := am.newAddressBucketIndex(knownAddressToRemove.netAddress, knownAddressToRemove.sourceAddress) + + // If no room in the original bucket, we put it in a bucket we just + // freed up a space in. + newAddressBucketArray := am.newAddressBucketArray(knownAddress.subnetworkID) + if len(newAddressBucketArray[newAddressBucketIndex]) >= newBucketSize { + if !oldBucketIndexFound { + // If address was a tried bucket with updated subnetworkID - oldBucketIndex will be equal to -1. + // In that case - find some non-full bucket. + // If no such bucket exists - throw knownAddressToRemove away + for newBucket := range newAddressBucketArray { + if len(newAddressBucketArray[newBucket]) < newBucketSize { + break + } + } + } else { + newAddressBucketIndex = oldBucketIndex + } + } + + // Replace with knownAddress in the slice + knownAddress.tried = true + triedAddressBucketArray[triedAddressBucketIndex][knownAddressToRemoveIndex] = knownAddress + + knownAddressToRemove.tried = false + knownAddressToRemove.referenceCount++ + + // We don't touch a.subnetworkTriedAddressCounts here since the number of tried stays the same + // but we decremented new above, raise it again since we're putting + // something back. + am.incrementNewAddressCount(knownAddress.subnetworkID) + + knownAddressToRemoveKey := NetAddressKey(knownAddressToRemove.netAddress) + log.Tracef("Replacing %s with %s in tried", knownAddressToRemoveKey, addressKey) + + // We made sure there is space here just above. + newAddressBucketArray[newAddressBucketIndex][knownAddressToRemoveKey] = knownAddressToRemove +} + +func (am *AddressManager) newAddressBucketArray(subnetworkID *subnetworkid.SubnetworkID) *newAddressBucketArray { + if subnetworkID == nil { + return &am.fullNodeNewAddressBucketArray + } + return am.subnetworkNewAddressBucketArrays[*subnetworkID] +} + +func (am *AddressManager) triedAddressBucketArray(subnetworkID *subnetworkid.SubnetworkID) *triedAddressBucketArray { + if subnetworkID == nil { + return &am.fullNodeTriedAddressBucketArray + } + return am.subnetworkTriedAddresBucketArrays[*subnetworkID] +} + +func (am *AddressManager) incrementNewAddressCount(subnetworkID *subnetworkid.SubnetworkID) { + if subnetworkID == nil { + am.fullNodeNewAddressCount++ + return + } + am.subnetworkNewAddressCounts[*subnetworkID]++ +} + +func (am *AddressManager) decrementNewAddressCount(subnetworkID *subnetworkid.SubnetworkID) { + if subnetworkID == nil { + am.fullNodeNewAddressCount-- + return + } + am.subnetworkNewAddressCounts[*subnetworkID]-- +} + +func (am *AddressManager) triedAddressCount(subnetworkID *subnetworkid.SubnetworkID) int { + if subnetworkID == nil { + return am.fullNodeTriedAddressCount + } + return am.subnetworkTriedAddressCounts[*subnetworkID] +} + +func (am *AddressManager) newAddressCount(subnetworkID *subnetworkid.SubnetworkID) int { + if subnetworkID == nil { + return am.fullNodeNewAddressCount + } + return am.subnetworkNewAddressCounts[*subnetworkID] +} + +func (am *AddressManager) incrementTriedAddressCount(subnetworkID *subnetworkid.SubnetworkID) { + if subnetworkID == nil { + am.fullNodeTriedAddressCount++ + return + } + am.subnetworkTriedAddressCounts[*subnetworkID]++ +} + +// AddLocalAddress adds netAddress to the list of known local addresses to advertise +// with the given priority. +func (am *AddressManager) AddLocalAddress(netAddress *wire.NetAddress, priority AddressPriority) error { + if !am.IsRoutable(netAddress) { + return errors.Errorf("address %s is not routable", netAddress.IP) + } + + am.localAddressesLock.Lock() + defer am.localAddressesLock.Unlock() + + addressKey := NetAddressKey(netAddress) + address, ok := am.localAddresses[addressKey] + if !ok || address.score < priority { + if ok { + address.score = priority + 1 + } else { + am.localAddresses[addressKey] = &localAddress{ + netAddress: netAddress, + score: priority, + } + } + } + return nil +} + +// getReachabilityFrom returns the relative reachability of the provided local +// address to the provided remote address. +func (am *AddressManager) getReachabilityFrom(localAddress, remoteAddress *wire.NetAddress) int { + const ( + Unreachable = 0 + Default = iota + Teredo + Ipv6Weak + Ipv4 + Ipv6Strong + Private + ) + + if !am.IsRoutable(remoteAddress) { + return Unreachable + } + + if IsRFC4380(remoteAddress) { + if !am.IsRoutable(localAddress) { + return Default + } + + if IsRFC4380(localAddress) { + return Teredo + } + + if IsIPv4(localAddress) { + return Ipv4 + } + + return Ipv6Weak + } + + if IsIPv4(remoteAddress) { + if am.IsRoutable(localAddress) && IsIPv4(localAddress) { + return Ipv4 + } + return Unreachable + } + + /* ipv6 */ + var tunnelled bool + // Is our v6 is tunnelled? + if IsRFC3964(localAddress) || IsRFC6052(localAddress) || IsRFC6145(localAddress) { + tunnelled = true + } + + if !am.IsRoutable(localAddress) { + return Default + } + + if IsRFC4380(localAddress) { + return Teredo + } + + if IsIPv4(localAddress) { + return Ipv4 + } + + if tunnelled { + // only prioritise ipv6 if we aren't tunnelling it. + return Ipv6Weak + } + + return Ipv6Strong +} + +// GetBestLocalAddress returns the most appropriate local address to use +// for the given remote address. +func (am *AddressManager) GetBestLocalAddress(remoteAddress *wire.NetAddress) *wire.NetAddress { + am.localAddressesLock.Lock() + defer am.localAddressesLock.Unlock() + + bestReach := 0 + var bestScore AddressPriority + var bestAddress *wire.NetAddress + for _, localAddress := range am.localAddresses { + reach := am.getReachabilityFrom(localAddress.netAddress, remoteAddress) + if reach > bestReach || + (reach == bestReach && localAddress.score > bestScore) { + bestReach = reach + bestScore = localAddress.score + bestAddress = localAddress.netAddress + } + } + if bestAddress != nil { + log.Debugf("Suggesting address %s:%d for %s:%d", bestAddress.IP, + bestAddress.Port, remoteAddress.IP, remoteAddress.Port) + } else { + log.Debugf("No worthy address for %s:%d", remoteAddress.IP, + remoteAddress.Port) + + // Send something unroutable if nothing suitable. + var ip net.IP + if !IsIPv4(remoteAddress) { + ip = net.IPv6zero + } else { + ip = net.IPv4zero + } + services := wire.SFNodeNetwork | wire.SFNodeBloom + bestAddress = wire.NewNetAddressIPPort(ip, 0, services) + } + + return bestAddress +} diff --git a/addrmgr/addrmanager_test.go b/addressmanager/addressmanager_test.go similarity index 94% rename from addrmgr/addrmanager_test.go rename to addressmanager/addressmanager_test.go index ca319fb51..90a83787d 100644 --- a/addrmgr/addrmanager_test.go +++ b/addressmanager/addressmanager_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr +package addressmanager import ( "fmt" @@ -26,7 +26,7 @@ import ( // method. type naTest struct { in wire.NetAddress - want string + want AddressKey } // naTests houses all of the tests to be performed against the NetAddressKey @@ -97,7 +97,7 @@ func addNaTests() { addNaTest("fef3::4:4", 8336, "[fef3::4:4]:8336") } -func addNaTest(ip string, port uint16, want string) { +func addNaTest(ip string, port uint16, want AddressKey) { nip := net.ParseIP(ip) na := *wire.NewNetAddressIPPort(nip, port, wire.SFNodeNetwork) test := naTest{na, want} @@ -109,7 +109,7 @@ func lookupFuncForTest(host string) ([]net.IP, error) { } func newAddrManagerForTest(t *testing.T, testName string, - localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddrManager, teardown func()) { + localSubnetworkID *subnetworkid.SubnetworkID) (addressManager *AddressManager, teardown func()) { cfg := config.DefaultConfig() cfg.SubnetworkID = localSubnetworkID @@ -302,7 +302,7 @@ func TestNeedMoreAddresses(t *testing.T) { var err error for i := 0; i < addrsToAdd; i++ { - s := fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60) + s := AddressKey(fmt.Sprintf("%d.%d.173.147:8333", i/128+60, i%128+60)) addrs[i], err = amgr.DeserializeNetAddress(s) if err != nil { t.Errorf("Failed to turn %s into an address: %v", s, err) @@ -333,7 +333,7 @@ func TestGood(t *testing.T) { var err error for i := 0; i < addrsToAdd; i++ { - s := fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60) + s := AddressKey(fmt.Sprintf("%d.173.147.%d:8333", i/64+60, i%64+60)) addrs[i], err = amgr.DeserializeNetAddress(s) if err != nil { t.Errorf("Failed to turn %s into an address: %v", s, err) @@ -382,8 +382,8 @@ func TestGoodChangeSubnetworkID(t *testing.T) { amgr.AddAddress(addr, srcAddr, oldSubnetwork) amgr.Good(addr, oldSubnetwork) - // make sure address was saved to addrIndex under oldSubnetwork - ka := amgr.find(addr) + // make sure address was saved to addressIndex under oldSubnetwork + ka := amgr.knownAddress(addr) if ka == nil { t.Fatalf("Address was not found after first time .Good called") } @@ -392,10 +392,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) { } // make sure address was added to correct bucket under oldSubnetwork - bucket := amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)] + bucket := amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)] wasFound := false - for e := bucket.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { + for _, ka := range bucket { + if NetAddressKey(ka.NetAddress()) == addrKey { wasFound = true } } @@ -407,8 +407,8 @@ func TestGoodChangeSubnetworkID(t *testing.T) { newSubnetwork := subnetworkid.SubnetworkIDRegistry amgr.Good(addr, newSubnetwork) - // make sure address was updated in addrIndex under newSubnetwork - ka = amgr.find(addr) + // make sure address was updated in addressIndex under newSubnetwork + ka = amgr.knownAddress(addr) if ka == nil { t.Fatalf("Address was not found after second time .Good called") } @@ -417,10 +417,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) { } // make sure address was removed from bucket under oldSubnetwork - bucket = amgr.addrTried[*oldSubnetwork][amgr.getTriedBucket(addr)] + bucket = amgr.subnetworkTriedAddresBucketArrays[*oldSubnetwork][amgr.triedAddressBucketIndex(addr)] wasFound = false - for e := bucket.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { + for _, ka := range bucket { + if NetAddressKey(ka.NetAddress()) == addrKey { wasFound = true } } @@ -429,10 +429,10 @@ func TestGoodChangeSubnetworkID(t *testing.T) { } // make sure address was added to correct bucket under newSubnetwork - bucket = amgr.addrTried[*newSubnetwork][amgr.getTriedBucket(addr)] + bucket = amgr.subnetworkTriedAddresBucketArrays[*newSubnetwork][amgr.triedAddressBucketIndex(addr)] wasFound = false - for e := bucket.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { + for _, ka := range bucket { + if NetAddressKey(ka.NetAddress()) == addrKey { wasFound = true } } diff --git a/addrmgr/doc.go b/addressmanager/doc.go similarity index 94% rename from addrmgr/doc.go rename to addressmanager/doc.go index adda20aac..e6663fe78 100644 --- a/addrmgr/doc.go +++ b/addressmanager/doc.go @@ -1,5 +1,5 @@ /* -Package addrmgr implements concurrency safe Kaspa address manager. +Package addressmanager implements concurrency safe Kaspa address manager. Address Manager Overview @@ -31,4 +31,4 @@ peers which no longer appear to be good peers as well as bias the selection toward known good peers. The general idea is to make a best effort at only providing usable addresses. */ -package addrmgr +package addressmanager diff --git a/addrmgr/internal_test.go b/addressmanager/internal_test.go similarity index 75% rename from addrmgr/internal_test.go rename to addressmanager/internal_test.go index bc33a96fa..b89143a2c 100644 --- a/addrmgr/internal_test.go +++ b/addressmanager/internal_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr +package addressmanager import ( "github.com/kaspanet/kaspad/util/mstime" @@ -19,6 +19,6 @@ func TstKnownAddressChance(ka *KnownAddress) float64 { func TstNewKnownAddress(na *wire.NetAddress, attempts int, lastattempt, lastsuccess mstime.Time, tried bool, refs int) *KnownAddress { - return &KnownAddress{na: na, attempts: attempts, lastattempt: lastattempt, - lastsuccess: lastsuccess, tried: tried, refs: refs} + return &KnownAddress{netAddress: na, attempts: attempts, lastAttempt: lastattempt, + lastSuccess: lastsuccess, tried: tried, referenceCount: refs} } diff --git a/addrmgr/knownaddress.go b/addressmanager/knownaddress.go similarity index 74% rename from addrmgr/knownaddress.go rename to addressmanager/knownaddress.go index dcdf4a87b..24f8cf095 100644 --- a/addrmgr/knownaddress.go +++ b/addressmanager/knownaddress.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr +package addressmanager import ( "github.com/kaspanet/kaspad/util/mstime" @@ -16,20 +16,20 @@ import ( // KnownAddress tracks information about a known network address that is used // to determine how viable an address is. type KnownAddress struct { - na *wire.NetAddress - srcAddr *wire.NetAddress - attempts int - lastattempt mstime.Time - lastsuccess mstime.Time - tried bool - refs int // reference count of new buckets - subnetworkID *subnetworkid.SubnetworkID + netAddress *wire.NetAddress + sourceAddress *wire.NetAddress + attempts int + lastAttempt mstime.Time + lastSuccess mstime.Time + tried bool + referenceCount int // reference count of new buckets + subnetworkID *subnetworkid.SubnetworkID } // NetAddress returns the underlying wire.NetAddress associated with the // known address. func (ka *KnownAddress) NetAddress() *wire.NetAddress { - return ka.na + return ka.netAddress } // SubnetworkID returns the subnetwork ID of the known address. @@ -39,7 +39,7 @@ func (ka *KnownAddress) SubnetworkID() *subnetworkid.SubnetworkID { // LastAttempt returns the last time the known address was attempted. func (ka *KnownAddress) LastAttempt() mstime.Time { - return ka.lastattempt + return ka.lastAttempt } // chance returns the selection probability for a known address. The priority @@ -47,7 +47,7 @@ func (ka *KnownAddress) LastAttempt() mstime.Time { // attempted and how often attempts to connect to it have failed. func (ka *KnownAddress) chance() float64 { now := mstime.Now() - lastAttempt := now.Sub(ka.lastattempt) + lastAttempt := now.Sub(ka.lastAttempt) if lastAttempt < 0 { lastAttempt = 0 @@ -77,27 +77,27 @@ func (ka *KnownAddress) chance() float64 { // All addresses that meet these criteria are assumed to be worthless and not // worth keeping hold of. func (ka *KnownAddress) isBad() bool { - if ka.lastattempt.After(mstime.Now().Add(-1 * time.Minute)) { + if ka.lastAttempt.After(mstime.Now().Add(-1 * time.Minute)) { return false } // From the future? - if ka.na.Timestamp.After(mstime.Now().Add(10 * time.Minute)) { + if ka.netAddress.Timestamp.After(mstime.Now().Add(10 * time.Minute)) { return true } // Over a month old? - if ka.na.Timestamp.Before(mstime.Now().Add(-1 * numMissingDays * time.Hour * 24)) { + if ka.netAddress.Timestamp.Before(mstime.Now().Add(-1 * numMissingDays * time.Hour * 24)) { return true } // Never succeeded? - if ka.lastsuccess.IsZero() && ka.attempts >= numRetries { + if ka.lastSuccess.IsZero() && ka.attempts >= numRetries { return true } // Hasn't succeeded in too long? - if !ka.lastsuccess.After(mstime.Now().Add(-1*minBadDays*time.Hour*24)) && + if !ka.lastSuccess.After(mstime.Now().Add(-1*minBadDays*time.Hour*24)) && ka.attempts >= maxFailures { return true } diff --git a/addrmgr/knownaddress_test.go b/addressmanager/knownaddress_test.go similarity index 58% rename from addrmgr/knownaddress_test.go rename to addressmanager/knownaddress_test.go index 62ab5b040..65bca6e26 100644 --- a/addrmgr/knownaddress_test.go +++ b/addressmanager/knownaddress_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr_test +package addressmanager_test import ( "github.com/kaspanet/kaspad/util/mstime" @@ -10,39 +10,39 @@ import ( "testing" "time" - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/wire" ) func TestChance(t *testing.T) { now := mstime.Now() var tests = []struct { - addr *addrmgr.KnownAddress + addr *addressmanager.KnownAddress expected float64 }{ { //Test normal case - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, + addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, 0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0), 1.0, }, { //Test case in which lastseen < 0 - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)}, + addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(20 * time.Second)}, 0, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0), 1.0, }, { - //Test case in which lastattempt < 0 - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, + //Test case in which lastAttempt < 0 + addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, 0, mstime.Now().Add(30*time.Minute), mstime.Now(), false, 0), 1.0 * .01, }, { - //Test case in which lastattempt < ten minutes - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, + //Test case in which lastAttempt < ten minutes + addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, 0, mstime.Now().Add(-5*time.Minute), mstime.Now(), false, 0), 1.0 * .01, }, { //Test case with several failed attempts. - addrmgr.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, + addressmanager.TstNewKnownAddress(&wire.NetAddress{Timestamp: now.Add(-35 * time.Second)}, 2, mstime.Now().Add(-30*time.Minute), mstime.Now(), false, 0), 1 / 1.5 / 1.5, }, @@ -50,7 +50,7 @@ func TestChance(t *testing.T) { err := .0001 for i, test := range tests { - chance := addrmgr.TstKnownAddressChance(test.addr) + chance := addressmanager.TstKnownAddressChance(test.addr) if math.Abs(test.expected-chance) >= err { t.Errorf("case %d: got %f, expected %f", i, chance, test.expected) } @@ -72,44 +72,44 @@ func TestIsBad(t *testing.T) { currentNa := &wire.NetAddress{Timestamp: secondsOld} //Test addresses that have been tried in the last minute. - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) { + if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 3, secondsOld, zeroTime, false, 0)) { t.Errorf("test case 1: addresses that have been tried in the last minute are not bad.") } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) { + if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 3, secondsOld, zeroTime, false, 0)) { t.Errorf("test case 2: addresses that have been tried in the last minute are not bad.") } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) { + if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, zeroTime, false, 0)) { t.Errorf("test case 3: addresses that have been tried in the last minute are not bad.") } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) { + if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 3, secondsOld, monthOld, true, 0)) { t.Errorf("test case 4: addresses that have been tried in the last minute are not bad.") } - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) { + if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(currentNa, 2, secondsOld, secondsOld, true, 0)) { t.Errorf("test case 5: addresses that have been tried in the last minute are not bad.") } //Test address that claims to be from the future. - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) { + if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(futureNa, 0, minutesOld, hoursOld, true, 0)) { t.Errorf("test case 6: addresses that claim to be from the future are bad.") } //Test address that has not been seen in over a month. - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) { + if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(monthOldNa, 0, minutesOld, hoursOld, true, 0)) { t.Errorf("test case 7: addresses more than a month old are bad.") } //It has failed at least three times and never succeeded. - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) { + if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 3, minutesOld, zeroTime, true, 0)) { t.Errorf("test case 8: addresses that have never succeeded are bad.") } //It has failed ten times in the last week - if !addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) { + if !addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 10, minutesOld, monthOld, true, 0)) { t.Errorf("test case 9: addresses that have not succeeded in too long are bad.") } //Test an address that should work. - if addrmgr.TstKnownAddressIsBad(addrmgr.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) { + if addressmanager.TstKnownAddressIsBad(addressmanager.TstNewKnownAddress(minutesOldNa, 2, minutesOld, hoursOld, true, 0)) { t.Errorf("test case 10: This should be a valid address.") } } diff --git a/addrmgr/log.go b/addressmanager/log.go similarity index 93% rename from addrmgr/log.go rename to addressmanager/log.go index f25419bef..b7c926b19 100644 --- a/addrmgr/log.go +++ b/addressmanager/log.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr +package addressmanager import ( "github.com/kaspanet/kaspad/logger" diff --git a/addrmgr/network.go b/addressmanager/network.go similarity index 97% rename from addrmgr/network.go rename to addressmanager/network.go index 3d1a833bf..5766e12c2 100644 --- a/addrmgr/network.go +++ b/addressmanager/network.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr +package addressmanager import ( "net" @@ -200,8 +200,8 @@ func IsValid(na *wire.NetAddress) bool { // IsRoutable returns whether or not the passed address is routable over // the public internet. This is true as long as the address is valid and is not // in any reserved ranges. -func (a *AddrManager) IsRoutable(na *wire.NetAddress) bool { - if a.cfg.NetParams().AcceptUnroutable { +func (am *AddressManager) IsRoutable(na *wire.NetAddress) bool { + if am.cfg.NetParams().AcceptUnroutable { return !IsLocal(na) } @@ -215,11 +215,11 @@ func (a *AddrManager) IsRoutable(na *wire.NetAddress) bool { // of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string // "local" for a local address, and the string "unroutable" for an unroutable // address. -func (a *AddrManager) GroupKey(na *wire.NetAddress) string { +func (am *AddressManager) GroupKey(na *wire.NetAddress) string { if IsLocal(na) { return "local" } - if !a.IsRoutable(na) { + if !am.IsRoutable(na) { return "unroutable" } if IsIPv4(na) { diff --git a/addrmgr/network_test.go b/addressmanager/network_test.go similarity index 99% rename from addrmgr/network_test.go rename to addressmanager/network_test.go index cc984001b..40cd92306 100644 --- a/addrmgr/network_test.go +++ b/addressmanager/network_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package addrmgr +package addressmanager import ( "net" diff --git a/addrmgr/addrmanager.go b/addrmgr/addrmanager.go deleted file mode 100644 index a7d00e6e7..000000000 --- a/addrmgr/addrmanager.go +++ /dev/null @@ -1,1374 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package addrmgr - -import ( - "bytes" - "container/list" - crand "crypto/rand" // for seeding - "encoding/binary" - "encoding/gob" - "io" - "math/rand" - "net" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/kaspanet/kaspad/config" - "github.com/kaspanet/kaspad/dbaccess" - "github.com/kaspanet/kaspad/util/mstime" - "github.com/pkg/errors" - - "github.com/kaspanet/kaspad/util/subnetworkid" - - "github.com/kaspanet/kaspad/util/daghash" - "github.com/kaspanet/kaspad/wire" -) - -type newBucket [NewBucketCount]map[string]*KnownAddress -type triedBucket [TriedBucketCount]*list.List - -// AddrManager provides a concurrency safe address manager for caching potential -// peers on the Kaspa network. -type AddrManager struct { - cfg *config.Config - databaseContext *dbaccess.DatabaseContext - mtx sync.Mutex - lookupFunc func(string) ([]net.IP, error) - rand *rand.Rand - key [32]byte - addrIndex map[string]*KnownAddress // address key to ka for all addrs. - addrNew map[subnetworkid.SubnetworkID]*newBucket - addrNewFullNodes newBucket - addrTried map[subnetworkid.SubnetworkID]*triedBucket - addrTriedFullNodes triedBucket - started int32 - shutdown int32 - wg sync.WaitGroup - quit chan struct{} - nTried map[subnetworkid.SubnetworkID]int - nNew map[subnetworkid.SubnetworkID]int - nTriedFullNodes int - nNewFullNodes int - lamtx sync.Mutex - localAddresses map[string]*localAddress - localSubnetworkID *subnetworkid.SubnetworkID -} - -// New returns a new Kaspa address manager. -func New(cfg *config.Config, databaseContext *dbaccess.DatabaseContext) *AddrManager { - addressManager := AddrManager{ - cfg: cfg, - databaseContext: databaseContext, - lookupFunc: cfg.Lookup, - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - quit: make(chan struct{}), - localAddresses: make(map[string]*localAddress), - localSubnetworkID: cfg.SubnetworkID, - } - addressManager.reset() - return &addressManager -} - -type serializedKnownAddress struct { - Addr string - Src string - SubnetworkID string - Attempts int - TimeStamp int64 - LastAttempt int64 - LastSuccess int64 - // no refcount or tried, that is available from context. -} - -type serializedNewBucket [NewBucketCount][]string -type serializedTriedBucket [TriedBucketCount][]string - -// PeersStateForSerialization is the data model that is used to -// serialize the peers state to any encoding. -type PeersStateForSerialization struct { - Version int - Key [32]byte - Addresses []*serializedKnownAddress - NewBuckets map[string]*serializedNewBucket // string is Subnetwork ID - NewBucketFullNodes serializedNewBucket - TriedBuckets map[string]*serializedTriedBucket // string is Subnetwork ID - TriedBucketFullNodes serializedTriedBucket -} - -type localAddress struct { - na *wire.NetAddress - score AddressPriority -} - -// AddressPriority type is used to describe the hierarchy of local address -// discovery methods. -type AddressPriority int - -const ( - // InterfacePrio signifies the address is on a local interface - InterfacePrio AddressPriority = iota - - // BoundPrio signifies the address has been explicitly bounded to. - BoundPrio - - // UpnpPrio signifies the address was obtained from UPnP. - UpnpPrio - - // HTTPPrio signifies the address was obtained from an external HTTP service. - HTTPPrio - - // ManualPrio signifies the address was provided by --externalip. - ManualPrio -) - -const ( - // needAddressThreshold is the number of addresses under which the - // address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // dumpAddressInterval is the interval used to dump the address - // cache to disk for future use. - dumpAddressInterval = time.Minute * 10 - - // triedBucketSize is the maximum number of addresses in each - // tried address bucket. - triedBucketSize = 256 - - // TriedBucketCount is the number of buckets we split tried - // addresses over. - TriedBucketCount = 64 - - // newBucketSize is the maximum number of addresses in each new address - // bucket. - newBucketSize = 64 - - // NewBucketCount is the number of buckets that we spread new addresses - // over. - NewBucketCount = 1024 - - // triedBucketsPerGroup is the number of tried buckets over which an - // address group will be spread. - triedBucketsPerGroup = 8 - - // newBucketsPerGroup is the number of new buckets over which an - // source address group will be spread. - newBucketsPerGroup = 64 - - // newBucketsPerAddress is the number of buckets a frequently seen new - // address may end up in. - newBucketsPerAddress = 8 - - // numMissingDays is the number of days before which we assume an - // address has vanished if we have not seen it announced in that long. - numMissingDays = 30 - - // numRetries is the number of tried without a single success before - // we assume an address is bad. - numRetries = 3 - - // maxFailures is the maximum number of failures we will accept without - // a success before considering an address bad. - maxFailures = 10 - - // minBadDays is the number of days since the last success before we - // will consider evicting an address. - minBadDays = 7 - - // getAddrMin is the least addresses that we will send in response - // to a getAddr. If we have less than this amount, we send everything. - getAddrMin = 50 - - // GetAddressesMax is the most addresses that we will send in response - // to a getAddr (in practise the most addresses we will return from a - // call to AddressCache()). - GetAddressesMax = 2500 - - // getAddrPercent is the percentage of total addresses known that we - // will share with a call to AddressCache. - getAddrPercent = 23 - - // serializationVersion is the current version of the on-disk format. - serializationVersion = 1 -) - -// updateAddress is a helper function to either update an address already known -// to the address manager, or to add the address if not already known. -func (a *AddrManager) updateAddress(netAddr, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - // Filter out non-routable addresses. Note that non-routable - // also includes invalid and local addresses. - if !a.IsRoutable(netAddr) { - return - } - - addr := NetAddressKey(netAddr) - ka := a.find(netAddr) - if ka != nil { - // TODO: only update addresses periodically. - // Update the last seen time and services. - // note that to prevent causing excess garbage on getaddr - // messages the netaddresses in addrmaanger are *immutable*, - // if we need to change them then we replace the pointer with a - // new copy so that we don't have to copy every na for getaddr. - if netAddr.Timestamp.After(ka.na.Timestamp) || - (ka.na.Services&netAddr.Services) != - netAddr.Services { - - naCopy := *ka.na - naCopy.Timestamp = netAddr.Timestamp - naCopy.AddService(netAddr.Services) - ka.na = &naCopy - } - - // If already in tried, we have nothing to do here. - if ka.tried { - return - } - - // Already at our max? - if ka.refs == newBucketsPerAddress { - return - } - - // The more entries we have, the less likely we are to add more. - // likelihood is 2N. - factor := int32(2 * ka.refs) - if a.rand.Int31n(factor) != 0 { - return - } - } else { - // Make a copy of the net address to avoid races since it is - // updated elsewhere in the addrmanager code and would otherwise - // change the actual netaddress on the peer. - netAddrCopy := *netAddr - ka = &KnownAddress{na: &netAddrCopy, srcAddr: srcAddr, subnetworkID: subnetworkID} - a.addrIndex[addr] = ka - if subnetworkID == nil { - a.nNewFullNodes++ - } else { - a.nNew[*subnetworkID]++ - } - // XXX time penalty? - } - - bucket := a.getNewBucket(netAddr, srcAddr) - - // Already exists? - if ka.subnetworkID == nil { - if _, ok := a.addrNewFullNodes[bucket][addr]; ok { - return - } - } else if a.addrNew[*ka.subnetworkID] != nil { - if _, ok := a.addrNew[*ka.subnetworkID][bucket][addr]; ok { - return - } - } - - // Enforce max addresses. - if ka.subnetworkID == nil { - if len(a.addrNewFullNodes[bucket]) > newBucketSize { - log.Tracef("new bucket of full nodes is full, expiring old") - a.expireNewFullNodes(bucket) - } - } else if a.addrNew[*ka.subnetworkID] != nil && len(a.addrNew[*ka.subnetworkID][bucket]) > newBucketSize { - log.Tracef("new bucket is full, expiring old") - a.expireNewBySubnetworkID(ka.subnetworkID, bucket) - } - - // Add to new bucket. - ka.refs++ - a.updateAddrNew(bucket, addr, ka) - - if ka.subnetworkID == nil { - log.Tracef("Added new full node address %s for a total of %d addresses", addr, - a.nTriedFullNodes+a.nNewFullNodes) - } else { - log.Tracef("Added new address %s for a total of %d addresses", addr, - a.nTried[*ka.subnetworkID]+a.nNew[*ka.subnetworkID]) - } -} - -func (a *AddrManager) updateAddrNew(bucket int, addr string, ka *KnownAddress) { - if ka.subnetworkID == nil { - a.addrNewFullNodes[bucket][addr] = ka - return - } - - if _, ok := a.addrNew[*ka.subnetworkID]; !ok { - a.addrNew[*ka.subnetworkID] = &newBucket{} - for i := range a.addrNew[*ka.subnetworkID] { - a.addrNew[*ka.subnetworkID][i] = make(map[string]*KnownAddress) - } - } - a.addrNew[*ka.subnetworkID][bucket][addr] = ka -} - -func (a *AddrManager) updateAddrTried(bucket int, ka *KnownAddress) { - if ka.subnetworkID == nil { - a.addrTriedFullNodes[bucket].PushBack(ka) - return - } - - if _, ok := a.addrTried[*ka.subnetworkID]; !ok { - a.addrTried[*ka.subnetworkID] = &triedBucket{} - for i := range a.addrTried[*ka.subnetworkID] { - a.addrTried[*ka.subnetworkID][i] = list.New() - } - } - a.addrTried[*ka.subnetworkID][bucket].PushBack(ka) -} - -// expireNew makes space in the new buckets by expiring the really bad entries. -// If no bad entries are available we look at a few and remove the oldest. -func (a *AddrManager) expireNew(bucket *newBucket, idx int, decrNewCounter func()) { - // First see if there are any entries that are so bad we can just throw - // them away. otherwise we throw away the oldest entry in the cache. - // We keep track of oldest in the initial traversal and use that - // information instead. - var oldest *KnownAddress - for k, v := range bucket[idx] { - if v.isBad() { - log.Tracef("expiring bad address %s", k) - delete(bucket[idx], k) - v.refs-- - if v.refs == 0 { - decrNewCounter() - delete(a.addrIndex, k) - } - continue - } - if oldest == nil { - oldest = v - } else if !v.na.Timestamp.After(oldest.na.Timestamp) { - oldest = v - } - } - - if oldest != nil { - key := NetAddressKey(oldest.na) - log.Tracef("expiring oldest address %s", key) - - delete(bucket[idx], key) - oldest.refs-- - if oldest.refs == 0 { - decrNewCounter() - delete(a.addrIndex, key) - } - } -} - -// expireNewBySubnetworkID makes space in the new buckets by expiring the really bad entries. -// If no bad entries are available we look at a few and remove the oldest. -func (a *AddrManager) expireNewBySubnetworkID(subnetworkID *subnetworkid.SubnetworkID, bucket int) { - a.expireNew(a.addrNew[*subnetworkID], bucket, func() { a.nNew[*subnetworkID]-- }) -} - -// expireNewFullNodes makes space in the new buckets by expiring the really bad entries. -// If no bad entries are available we look at a few and remove the oldest. -func (a *AddrManager) expireNewFullNodes(bucket int) { - a.expireNew(&a.addrNewFullNodes, bucket, func() { a.nNewFullNodes-- }) -} - -// pickTried selects an address from the tried bucket to be evicted. -// We just choose the eldest. -func (a *AddrManager) pickTried(subnetworkID *subnetworkid.SubnetworkID, bucket int) *list.Element { - var oldest *KnownAddress - var oldestElem *list.Element - var lst *list.List - if subnetworkID == nil { - lst = a.addrTriedFullNodes[bucket] - } else { - lst = a.addrTried[*subnetworkID][bucket] - } - for e := lst.Front(); e != nil; e = e.Next() { - ka := e.Value.(*KnownAddress) - if oldest == nil || oldest.na.Timestamp.After(ka.na.Timestamp) { - oldestElem = e - oldest = ka - } - - } - return oldestElem -} - -func (a *AddrManager) getNewBucket(netAddr, srcAddr *wire.NetAddress) int { - // doublesha256(key + sourcegroup + int64(doublesha256(key + group + sourcegroup))%bucket_per_source_group) % num_new_buckets - - data1 := []byte{} - data1 = append(data1, a.key[:]...) - data1 = append(data1, []byte(a.GroupKey(netAddr))...) - data1 = append(data1, []byte(a.GroupKey(srcAddr))...) - hash1 := daghash.DoubleHashB(data1) - hash64 := binary.LittleEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.LittleEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, a.key[:]...) - data2 = append(data2, a.GroupKey(srcAddr)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := daghash.DoubleHashB(data2) - return int(binary.LittleEndian.Uint64(hash2) % NewBucketCount) -} - -func (a *AddrManager) getTriedBucket(netAddr *wire.NetAddress) int { - // doublesha256(key + group + truncate_to_64bits(doublesha256(key)) % buckets_per_group) % num_buckets - data1 := []byte{} - data1 = append(data1, a.key[:]...) - data1 = append(data1, []byte(NetAddressKey(netAddr))...) - hash1 := daghash.DoubleHashB(data1) - hash64 := binary.LittleEndian.Uint64(hash1) - hash64 %= triedBucketsPerGroup - var hashbuf [8]byte - binary.LittleEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, a.key[:]...) - data2 = append(data2, a.GroupKey(netAddr)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := daghash.DoubleHashB(data2) - return int(binary.LittleEndian.Uint64(hash2) % TriedBucketCount) -} - -// addressHandler is the main handler for the address manager. It must be run -// as a goroutine. -func (a *AddrManager) addressHandler() { - dumpAddressTicker := time.NewTicker(dumpAddressInterval) - defer dumpAddressTicker.Stop() -out: - for { - select { - case <-dumpAddressTicker.C: - err := a.savePeers() - if err != nil { - panic(errors.Wrap(err, "error saving peers")) - } - - case <-a.quit: - break out - } - } - err := a.savePeers() - if err != nil { - panic(errors.Wrap(err, "error saving peers")) - } - a.wg.Done() - log.Trace("Address handler done") -} - -// savePeers saves all the known addresses to the database so they can be read back -// in at next run. -func (a *AddrManager) savePeers() error { - serializedPeersState, err := a.serializePeersState() - if err != nil { - return err - } - - return dbaccess.StorePeersState(a.databaseContext, serializedPeersState) -} - -func (a *AddrManager) serializePeersState() ([]byte, error) { - peersState, err := a.PeersStateForSerialization() - if err != nil { - return nil, err - } - - w := &bytes.Buffer{} - encoder := gob.NewEncoder(w) - err = encoder.Encode(&peersState) - if err != nil { - return nil, errors.Wrap(err, "failed to encode peers state") - } - - return w.Bytes(), nil -} - -// PeersStateForSerialization returns the data model that is used to serialize the peers state to any encoding. -func (a *AddrManager) PeersStateForSerialization() (*PeersStateForSerialization, error) { - a.mtx.Lock() - defer a.mtx.Unlock() - - // First we make a serializable data structure so we can encode it to - // gob. - peersState := new(PeersStateForSerialization) - peersState.Version = serializationVersion - copy(peersState.Key[:], a.key[:]) - - peersState.Addresses = make([]*serializedKnownAddress, len(a.addrIndex)) - i := 0 - for k, v := range a.addrIndex { - ska := new(serializedKnownAddress) - ska.Addr = k - if v.subnetworkID == nil { - ska.SubnetworkID = "" - } else { - ska.SubnetworkID = v.subnetworkID.String() - } - ska.TimeStamp = v.na.Timestamp.UnixMilliseconds() - ska.Src = NetAddressKey(v.srcAddr) - ska.Attempts = v.attempts - ska.LastAttempt = v.lastattempt.UnixMilliseconds() - ska.LastSuccess = v.lastsuccess.UnixMilliseconds() - // Tried and refs are implicit in the rest of the structure - // and will be worked out from context on unserialisation. - peersState.Addresses[i] = ska - i++ - } - - peersState.NewBuckets = make(map[string]*serializedNewBucket) - for subnetworkID := range a.addrNew { - subnetworkIDStr := subnetworkID.String() - peersState.NewBuckets[subnetworkIDStr] = &serializedNewBucket{} - - for i := range a.addrNew[subnetworkID] { - peersState.NewBuckets[subnetworkIDStr][i] = make([]string, len(a.addrNew[subnetworkID][i])) - j := 0 - for k := range a.addrNew[subnetworkID][i] { - peersState.NewBuckets[subnetworkIDStr][i][j] = k - j++ - } - } - } - - for i := range a.addrNewFullNodes { - peersState.NewBucketFullNodes[i] = make([]string, len(a.addrNewFullNodes[i])) - j := 0 - for k := range a.addrNewFullNodes[i] { - peersState.NewBucketFullNodes[i][j] = k - j++ - } - } - - peersState.TriedBuckets = make(map[string]*serializedTriedBucket) - for subnetworkID := range a.addrTried { - subnetworkIDStr := subnetworkID.String() - peersState.TriedBuckets[subnetworkIDStr] = &serializedTriedBucket{} - - for i := range a.addrTried[subnetworkID] { - peersState.TriedBuckets[subnetworkIDStr][i] = make([]string, a.addrTried[subnetworkID][i].Len()) - j := 0 - for e := a.addrTried[subnetworkID][i].Front(); e != nil; e = e.Next() { - ka := e.Value.(*KnownAddress) - peersState.TriedBuckets[subnetworkIDStr][i][j] = NetAddressKey(ka.na) - j++ - } - } - } - - for i := range a.addrTriedFullNodes { - peersState.TriedBucketFullNodes[i] = make([]string, a.addrTriedFullNodes[i].Len()) - j := 0 - for e := a.addrTriedFullNodes[i].Front(); e != nil; e = e.Next() { - ka := e.Value.(*KnownAddress) - peersState.TriedBucketFullNodes[i][j] = NetAddressKey(ka.na) - j++ - } - } - - return peersState, nil -} - -// loadPeers loads the known address from the database. If missing, -// just don't load anything and start fresh. -func (a *AddrManager) loadPeers() error { - a.mtx.Lock() - defer a.mtx.Unlock() - - serializedPeerState, err := dbaccess.FetchPeersState(a.databaseContext) - if dbaccess.IsNotFoundError(err) { - a.reset() - log.Info("No peers state was found in the database. Created a new one", a.totalNumAddresses()) - return nil - } - if err != nil { - return err - } - - err = a.deserializePeersState(serializedPeerState) - if err != nil { - return err - } - - log.Infof("Loaded %d addresses from database", a.totalNumAddresses()) - return nil -} - -func (a *AddrManager) deserializePeersState(serializedPeerState []byte) error { - var peersState PeersStateForSerialization - r := bytes.NewBuffer(serializedPeerState) - dec := gob.NewDecoder(r) - err := dec.Decode(&peersState) - if err != nil { - return errors.Wrap(err, "error deserializing peers state") - } - - if peersState.Version != serializationVersion { - return errors.Errorf("unknown version %d in serialized "+ - "peers state", peersState.Version) - } - copy(a.key[:], peersState.Key[:]) - - for _, v := range peersState.Addresses { - ka := new(KnownAddress) - ka.na, err = a.DeserializeNetAddress(v.Addr) - if err != nil { - return errors.Errorf("failed to deserialize netaddress "+ - "%s: %s", v.Addr, err) - } - ka.srcAddr, err = a.DeserializeNetAddress(v.Src) - if err != nil { - return errors.Errorf("failed to deserialize netaddress "+ - "%s: %s", v.Src, err) - } - if v.SubnetworkID != "" { - ka.subnetworkID, err = subnetworkid.NewFromStr(v.SubnetworkID) - if err != nil { - return errors.Errorf("failed to deserialize subnetwork id "+ - "%s: %s", v.SubnetworkID, err) - } - } - ka.attempts = v.Attempts - ka.lastattempt = mstime.UnixMilliseconds(v.LastAttempt) - ka.lastsuccess = mstime.UnixMilliseconds(v.LastSuccess) - a.addrIndex[NetAddressKey(ka.na)] = ka - } - - for subnetworkIDStr := range peersState.NewBuckets { - subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) - if err != nil { - return err - } - for i, subnetworkNewBucket := range peersState.NewBuckets[subnetworkIDStr] { - for _, val := range subnetworkNewBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("newbucket contains %s but "+ - "none in address list", val) - } - - if ka.refs == 0 { - a.nNew[*subnetworkID]++ - } - ka.refs++ - a.updateAddrNew(i, val, ka) - } - } - } - - for i, newBucket := range peersState.NewBucketFullNodes { - for _, val := range newBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("full nodes newbucket contains %s but "+ - "none in address list", val) - } - - if ka.refs == 0 { - a.nNewFullNodes++ - } - ka.refs++ - a.updateAddrNew(i, val, ka) - } - } - - for subnetworkIDStr := range peersState.TriedBuckets { - subnetworkID, err := subnetworkid.NewFromStr(subnetworkIDStr) - if err != nil { - return err - } - for i, subnetworkTriedBucket := range peersState.TriedBuckets[subnetworkIDStr] { - for _, val := range subnetworkTriedBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("Tried bucket contains %s but "+ - "none in address list", val) - } - - ka.tried = true - a.nTried[*subnetworkID]++ - a.addrTried[*subnetworkID][i].PushBack(ka) - } - } - } - - for i, triedBucket := range peersState.TriedBucketFullNodes { - for _, val := range triedBucket { - ka, ok := a.addrIndex[val] - if !ok { - return errors.Errorf("Full nodes tried bucket contains %s but "+ - "none in address list", val) - } - - ka.tried = true - a.nTriedFullNodes++ - a.addrTriedFullNodes[i].PushBack(ka) - } - } - - // Sanity checking. - for k, v := range a.addrIndex { - if v.refs == 0 && !v.tried { - return errors.Errorf("address %s after serialisation "+ - "with no references", k) - } - - if v.refs > 0 && v.tried { - return errors.Errorf("address %s after serialisation "+ - "which is both new and tried!", k) - } - } - - return nil -} - -// DeserializeNetAddress converts a given address string to a *wire.NetAddress -func (a *AddrManager) DeserializeNetAddress(addr string) (*wire.NetAddress, error) { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, err - } - - return a.HostToNetAddress(host, uint16(port), wire.SFNodeNetwork) -} - -// Start begins the core address handler which manages a pool of known -// addresses, timeouts, and interval based writes. -func (a *AddrManager) Start() error { - // Already started? - if atomic.AddInt32(&a.started, 1) != 1 { - return nil - } - - log.Trace("Starting address manager") - - // Load peers we already know about from the database. - err := a.loadPeers() - if err != nil { - return err - } - - // Start the address ticker to save addresses periodically. - a.wg.Add(1) - spawn("AddrManager.addressHandler", a.addressHandler) - return nil -} - -// Stop gracefully shuts down the address manager by stopping the main handler. -func (a *AddrManager) Stop() error { - if atomic.AddInt32(&a.shutdown, 1) != 1 { - log.Warnf("Address manager is already in the process of " + - "shutting down") - return nil - } - - log.Infof("Address manager shutting down") - close(a.quit) - a.wg.Wait() - return nil -} - -// AddAddresses adds new addresses to the address manager. It enforces a max -// number of addresses and silently ignores duplicate addresses. It is -// safe for concurrent access. -func (a *AddrManager) AddAddresses(addrs []*wire.NetAddress, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, na := range addrs { - a.updateAddress(na, srcAddr, subnetworkID) - } -} - -// AddAddress adds a new address to the address manager. It enforces a max -// number of addresses and silently ignores duplicate addresses. It is -// safe for concurrent access. -func (a *AddrManager) AddAddress(addr, srcAddr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.updateAddress(addr, srcAddr, subnetworkID) -} - -// AddAddressByIP adds an address where we are given an ip:port and not a -// wire.NetAddress. -func (a *AddrManager) AddAddressByIP(addrIP string, subnetworkID *subnetworkid.SubnetworkID) error { - // Split IP and port - addr, portStr, err := net.SplitHostPort(addrIP) - if err != nil { - return err - } - // Put it in wire.Netaddress - ip := net.ParseIP(addr) - if ip == nil { - return errors.Errorf("invalid ip address %s", addr) - } - port, err := strconv.ParseUint(portStr, 10, 0) - if err != nil { - return errors.Errorf("invalid port %s: %s", portStr, err) - } - na := wire.NewNetAddressIPPort(ip, uint16(port), 0) - a.AddAddress(na, na, subnetworkID) // XXX use correct src address - return nil -} - -// numAddresses returns the number of addresses that belongs to a specific subnetwork id -// which are known to the address manager. -func (a *AddrManager) numAddresses(subnetworkID *subnetworkid.SubnetworkID) int { - if subnetworkID == nil { - return a.nNewFullNodes + a.nTriedFullNodes - } - return a.nTried[*subnetworkID] + a.nNew[*subnetworkID] -} - -// totalNumAddresses returns the number of addresses known to the address manager. -func (a *AddrManager) totalNumAddresses() int { - total := a.nNewFullNodes + a.nTriedFullNodes - for _, numAddresses := range a.nTried { - total += numAddresses - } - for _, numAddresses := range a.nNew { - total += numAddresses - } - return total -} - -// TotalNumAddresses returns the number of addresses known to the address manager. -func (a *AddrManager) TotalNumAddresses() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.totalNumAddresses() -} - -// NeedMoreAddresses returns whether or not the address manager needs more -// addresses. -func (a *AddrManager) NeedMoreAddresses() bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - allAddrs := a.numAddresses(a.localSubnetworkID) - if a.localSubnetworkID != nil { - allAddrs += a.numAddresses(nil) - } - return allAddrs < needAddressThreshold -} - -// AddressCache returns the current address cache. It must be treated as -// read-only (but since it is a copy now, this is not as dangerous). -func (a *AddrManager) AddressCache(includeAllSubnetworks bool, subnetworkID *subnetworkid.SubnetworkID) []*wire.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - if len(a.addrIndex) == 0 { - return nil - } - - allAddr := []*wire.NetAddress{} - // Iteration order is undefined here, but we randomise it anyway. - for _, v := range a.addrIndex { - if includeAllSubnetworks || v.SubnetworkID().IsEqual(subnetworkID) { - allAddr = append(allAddr, v.na) - } - } - - numAddresses := len(allAddr) * getAddrPercent / 100 - if numAddresses > GetAddressesMax { - numAddresses = GetAddressesMax - } - if len(allAddr) < getAddrMin { - numAddresses = len(allAddr) - } - if len(allAddr) > getAddrMin && numAddresses < getAddrMin { - numAddresses = getAddrMin - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - j := rand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[0:numAddresses] -} - -// reset resets the address manager by reinitialising the random source -// and allocating fresh empty bucket storage. -func (a *AddrManager) reset() { - a.addrIndex = make(map[string]*KnownAddress) - - // fill key with bytes from a good random source. - io.ReadFull(crand.Reader, a.key[:]) - a.addrNew = make(map[subnetworkid.SubnetworkID]*newBucket) - a.addrTried = make(map[subnetworkid.SubnetworkID]*triedBucket) - - a.nNew = make(map[subnetworkid.SubnetworkID]int) - a.nTried = make(map[subnetworkid.SubnetworkID]int) - - for i := range a.addrNewFullNodes { - a.addrNewFullNodes[i] = make(map[string]*KnownAddress) - } - for i := range a.addrTriedFullNodes { - a.addrTriedFullNodes[i] = list.New() - } - a.nNewFullNodes = 0 - a.nTriedFullNodes = 0 -} - -// HostToNetAddress returns a netaddress given a host address. If -// the host is not an IP address it will be resolved. -func (a *AddrManager) HostToNetAddress(host string, port uint16, services wire.ServiceFlag) (*wire.NetAddress, error) { - ip := net.ParseIP(host) - if ip == nil { - ips, err := a.lookupFunc(host) - if err != nil { - return nil, err - } - if len(ips) == 0 { - return nil, errors.Errorf("no addresses found for %s", host) - } - ip = ips[0] - } - - return wire.NewNetAddressIPPort(ip, port, services), nil -} - -// NetAddressKey returns a string key in the form of ip:port for IPv4 addresses -// or [ip]:port for IPv6 addresses. -func NetAddressKey(na *wire.NetAddress) string { - port := strconv.FormatUint(uint64(na.Port), 10) - - return net.JoinHostPort(na.IP.String(), port) -} - -// GetAddress returns a single address that should be routable. It picks a -// random one from the possible addresses with preference given to ones that -// have not been used recently and should not pick 'close' addresses -// consecutively. -func (a *AddrManager) GetAddress() *KnownAddress { - // Protect concurrent access. - a.mtx.Lock() - defer a.mtx.Unlock() - - var knownAddress *KnownAddress - if a.localSubnetworkID == nil { - knownAddress = a.getAddress(&a.addrTriedFullNodes, a.nTriedFullNodes, - &a.addrNewFullNodes, a.nNewFullNodes) - } else { - subnetworkID := *a.localSubnetworkID - knownAddress = a.getAddress(a.addrTried[subnetworkID], a.nTried[subnetworkID], - a.addrNew[subnetworkID], a.nNew[subnetworkID]) - } - - return knownAddress - -} - -// see GetAddress for details -func (a *AddrManager) getAddress(addrTried *triedBucket, nTried int, addrNew *newBucket, nNew int) *KnownAddress { - // Use a 50% chance for choosing between tried and new table entries. - if nTried > 0 && (nNew == 0 || a.rand.Intn(2) == 0) { - // Tried entry. - large := 1 << 30 - factor := 1.0 - for { - // pick a random bucket. - bucket := a.rand.Intn(len(addrTried)) - if addrTried[bucket].Len() == 0 { - continue - } - - // Pick a random entry in the list - e := addrTried[bucket].Front() - for i := - a.rand.Int63n(int64(addrTried[bucket].Len())); i > 0; i-- { - e = e.Next() - } - ka := e.Value.(*KnownAddress) - randval := a.rand.Intn(large) - if float64(randval) < (factor * ka.chance() * float64(large)) { - log.Tracef("Selected %s from tried bucket", - NetAddressKey(ka.na)) - return ka - } - factor *= 1.2 - } - } else if nNew > 0 { - // new node. - // XXX use a closure/function to avoid repeating this. - large := 1 << 30 - factor := 1.0 - for { - // Pick a random bucket. - bucket := a.rand.Intn(len(addrNew)) - if len(addrNew[bucket]) == 0 { - continue - } - // Then, a random entry in it. - var ka *KnownAddress - nth := a.rand.Intn(len(addrNew[bucket])) - for _, value := range addrNew[bucket] { - if nth == 0 { - ka = value - } - nth-- - } - randval := a.rand.Intn(large) - if float64(randval) < (factor * ka.chance() * float64(large)) { - log.Tracef("Selected %s from new bucket", - NetAddressKey(ka.na)) - return ka - } - factor *= 1.2 - } - } - return nil -} - -func (a *AddrManager) find(addr *wire.NetAddress) *KnownAddress { - return a.addrIndex[NetAddressKey(addr)] -} - -// Attempt increases the given address' attempt counter and updates -// the last attempt time. -func (a *AddrManager) Attempt(addr *wire.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - // find address. - // Surely address will be in tried by now? - ka := a.find(addr) - if ka == nil { - return - } - // set last tried time to now - ka.attempts++ - ka.lastattempt = mstime.Now() -} - -// Connected Marks the given address as currently connected and working at the -// current time. The address must already be known to AddrManager else it will -// be ignored. -func (a *AddrManager) Connected(addr *wire.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.find(addr) - if ka == nil { - return - } - - // Update the time as long as it has been 20 minutes since last we did - // so. - now := mstime.Now() - if now.After(ka.na.Timestamp.Add(time.Minute * 20)) { - // ka.na is immutable, so replace it. - naCopy := *ka.na - naCopy.Timestamp = mstime.Now() - ka.na = &naCopy - } -} - -// Good marks the given address as good. To be called after a successful -// connection and version exchange. If the address is unknown to the address -// manager it will be ignored. -func (a *AddrManager) Good(addr *wire.NetAddress, subnetworkID *subnetworkid.SubnetworkID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.find(addr) - if ka == nil { - return - } - oldSubnetworkID := ka.subnetworkID - - // ka.Timestamp is not updated here to avoid leaking information - // about currently connected peers. - now := mstime.Now() - ka.lastsuccess = now - ka.lastattempt = now - ka.attempts = 0 - ka.subnetworkID = subnetworkID - - addrKey := NetAddressKey(addr) - triedBucketIndex := a.getTriedBucket(ka.na) - - if ka.tried { - // If this address was already tried, and subnetworkID didn't change - don't do anything - if subnetworkID.IsEqual(oldSubnetworkID) { - return - } - - // If this address was already tried, but subnetworkID was changed - - // update subnetworkID, than continue as though this is a new address - bucketList := a.addrTried[*oldSubnetworkID][triedBucketIndex] - for e := bucketList.Front(); e != nil; e = e.Next() { - if NetAddressKey(e.Value.(*KnownAddress).NetAddress()) == addrKey { - bucketList.Remove(e) - break - } - } - } - - // Ok, need to move it to tried. - - // Remove from all new buckets. - // Record one of the buckets in question and call it the `first' - oldBucket := -1 - if !ka.tried { - if oldSubnetworkID == nil { - for i := range a.addrNewFullNodes { - // we check for existence so we can record the first one - if _, ok := a.addrNewFullNodes[i][addrKey]; ok { - delete(a.addrNewFullNodes[i], addrKey) - ka.refs-- - if oldBucket == -1 { - oldBucket = i - } - } - } - a.nNewFullNodes-- - } else { - for i := range a.addrNew[*oldSubnetworkID] { - // we check for existence so we can record the first one - if _, ok := a.addrNew[*oldSubnetworkID][i][addrKey]; ok { - delete(a.addrNew[*oldSubnetworkID][i], addrKey) - ka.refs-- - if oldBucket == -1 { - oldBucket = i - } - } - } - a.nNew[*oldSubnetworkID]-- - } - - if oldBucket == -1 { - // What? wasn't in a bucket after all.... Panic? - return - } - } - - // Room in this tried bucket? - if ka.subnetworkID == nil { - if a.nTriedFullNodes == 0 || a.addrTriedFullNodes[triedBucketIndex].Len() < triedBucketSize { - ka.tried = true - a.updateAddrTried(triedBucketIndex, ka) - a.nTriedFullNodes++ - return - } - } else if a.nTried[*ka.subnetworkID] == 0 || a.addrTried[*ka.subnetworkID][triedBucketIndex].Len() < triedBucketSize { - ka.tried = true - a.updateAddrTried(triedBucketIndex, ka) - a.nTried[*ka.subnetworkID]++ - return - } - - // No room, we have to evict something else. - entry := a.pickTried(ka.subnetworkID, triedBucketIndex) - rmka := entry.Value.(*KnownAddress) - - // First bucket it would have been put in. - newBucket := a.getNewBucket(rmka.na, rmka.srcAddr) - - // If no room in the original bucket, we put it in a bucket we just - // freed up a space in. - if ka.subnetworkID == nil { - if len(a.addrNewFullNodes[newBucket]) >= newBucketSize { - if oldBucket == -1 { - // If addr was a tried bucket with updated subnetworkID - oldBucket will be equal to -1. - // In that case - find some non-full bucket. - // If no such bucket exists - throw rmka away - for newBucket := range a.addrNewFullNodes { - if len(a.addrNewFullNodes[newBucket]) < newBucketSize { - break - } - } - } else { - newBucket = oldBucket - } - } - } else if len(a.addrNew[*ka.subnetworkID][newBucket]) >= newBucketSize { - if len(a.addrNew[*ka.subnetworkID][newBucket]) >= newBucketSize { - if oldBucket == -1 { - // If addr was a tried bucket with updated subnetworkID - oldBucket will be equal to -1. - // In that case - find some non-full bucket. - // If no such bucket exists - throw rmka away - for newBucket := range a.addrNew[*ka.subnetworkID] { - if len(a.addrNew[*ka.subnetworkID][newBucket]) < newBucketSize { - break - } - } - } else { - newBucket = oldBucket - } - } - } - - // Replace with ka in list. - ka.tried = true - entry.Value = ka - - rmka.tried = false - rmka.refs++ - - // We don't touch a.nTried here since the number of tried stays the same - // but we decemented new above, raise it again since we're putting - // something back. - if ka.subnetworkID == nil { - a.nNewFullNodes++ - } else { - a.nNew[*ka.subnetworkID]++ - } - - rmkey := NetAddressKey(rmka.na) - log.Tracef("Replacing %s with %s in tried", rmkey, addrKey) - - // We made sure there is space here just above. - if ka.subnetworkID == nil { - a.addrNewFullNodes[newBucket][rmkey] = rmka - } else { - a.addrNew[*ka.subnetworkID][newBucket][rmkey] = rmka - } -} - -// AddLocalAddress adds na to the list of known local addresses to advertise -// with the given priority. -func (a *AddrManager) AddLocalAddress(na *wire.NetAddress, priority AddressPriority) error { - if !a.IsRoutable(na) { - return errors.Errorf("address %s is not routable", na.IP) - } - - a.lamtx.Lock() - defer a.lamtx.Unlock() - - key := NetAddressKey(na) - la, ok := a.localAddresses[key] - if !ok || la.score < priority { - if ok { - la.score = priority + 1 - } else { - a.localAddresses[key] = &localAddress{ - na: na, - score: priority, - } - } - } - return nil -} - -// getReachabilityFrom returns the relative reachability of the provided local -// address to the provided remote address. -func (a *AddrManager) getReachabilityFrom(localAddr, remoteAddr *wire.NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6Weak - Ipv4 - Ipv6Strong - Private - ) - - if !a.IsRoutable(remoteAddr) { - return Unreachable - } - - if IsRFC4380(remoteAddr) { - if !a.IsRoutable(localAddr) { - return Default - } - - if IsRFC4380(localAddr) { - return Teredo - } - - if IsIPv4(localAddr) { - return Ipv4 - } - - return Ipv6Weak - } - - if IsIPv4(remoteAddr) { - if a.IsRoutable(localAddr) && IsIPv4(localAddr) { - return Ipv4 - } - return Unreachable - } - - /* ipv6 */ - var tunnelled bool - // Is our v6 is tunnelled? - if IsRFC3964(localAddr) || IsRFC6052(localAddr) || IsRFC6145(localAddr) { - tunnelled = true - } - - if !a.IsRoutable(localAddr) { - return Default - } - - if IsRFC4380(localAddr) { - return Teredo - } - - if IsIPv4(localAddr) { - return Ipv4 - } - - if tunnelled { - // only prioritise ipv6 if we aren't tunnelling it. - return Ipv6Weak - } - - return Ipv6Strong -} - -// GetBestLocalAddress returns the most appropriate local address to use -// for the given remote address. -func (a *AddrManager) GetBestLocalAddress(remoteAddr *wire.NetAddress) *wire.NetAddress { - a.lamtx.Lock() - defer a.lamtx.Unlock() - - bestreach := 0 - var bestscore AddressPriority - var bestAddress *wire.NetAddress - for _, la := range a.localAddresses { - reach := a.getReachabilityFrom(la.na, remoteAddr) - if reach > bestreach || - (reach == bestreach && la.score > bestscore) { - bestreach = reach - bestscore = la.score - bestAddress = la.na - } - } - if bestAddress != nil { - log.Debugf("Suggesting address %s:%d for %s:%d", bestAddress.IP, - bestAddress.Port, remoteAddr.IP, remoteAddr.Port) - } else { - log.Debugf("No worthy address for %s:%d", remoteAddr.IP, - remoteAddr.Port) - - // Send something unroutable if nothing suitable. - var ip net.IP - if !IsIPv4(remoteAddr) { - ip = net.IPv6zero - } else { - ip = net.IPv4zero - } - services := wire.SFNodeNetwork | wire.SFNodeBloom - bestAddress = wire.NewNetAddressIPPort(ip, 0, services) - } - - return bestAddress -} diff --git a/blockdag/blocknode.go b/blockdag/blocknode.go index 6a32b9be6..998297728 100644 --- a/blockdag/blocknode.go +++ b/blockdag/blocknode.go @@ -224,7 +224,7 @@ func (node *blockNode) isGenesis() bool { } func (node *blockNode) finalityScore(dag *BlockDAG) uint64 { - return node.blueScore / uint64(dag.Params.FinalityInterval) + return node.blueScore / uint64(dag.FinalityInterval()) } // String returns a string that contains the block hash. diff --git a/blockdag/blockwindow_test.go b/blockdag/blockwindow_test.go index 540f89253..34bf95eb8 100644 --- a/blockdag/blockwindow_test.go +++ b/blockdag/blockwindow_test.go @@ -51,14 +51,14 @@ func TestBlueBlockWindow(t *testing.T) { expectedWindowWithGenesisPadding: []string{"B", "A", "A", "A", "A", "A", "A", "A", "A", "A"}, }, { - parents: []string{"C", "D"}, + parents: []string{"D", "C"}, id: "E", - expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, }, { - parents: []string{"C", "D"}, + parents: []string{"D", "C"}, id: "F", - expectedWindowWithGenesisPadding: []string{"C", "D", "B", "A", "A", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"D", "C", "B", "A", "A", "A", "A", "A", "A", "A"}, }, { parents: []string{"A"}, @@ -73,37 +73,37 @@ func TestBlueBlockWindow(t *testing.T) { { parents: []string{"H", "F"}, id: "I", - expectedWindowWithGenesisPadding: []string{"F", "C", "D", "B", "A", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"F", "D", "C", "B", "A", "A", "A", "A", "A", "A"}, }, { parents: []string{"I"}, id: "J", - expectedWindowWithGenesisPadding: []string{"I", "F", "C", "D", "B", "A", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"I", "F", "D", "C", "B", "A", "A", "A", "A", "A"}, }, { parents: []string{"J"}, id: "K", - expectedWindowWithGenesisPadding: []string{"J", "I", "F", "C", "D", "B", "A", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"J", "I", "F", "D", "C", "B", "A", "A", "A", "A"}, }, { parents: []string{"K"}, id: "L", - expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "C", "D", "B", "A", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"K", "J", "I", "F", "D", "C", "B", "A", "A", "A"}, }, { parents: []string{"L"}, id: "M", - expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "C", "D", "B", "A", "A"}, + expectedWindowWithGenesisPadding: []string{"L", "K", "J", "I", "F", "D", "C", "B", "A", "A"}, }, { parents: []string{"M"}, id: "N", - expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "C", "D", "B", "A"}, + expectedWindowWithGenesisPadding: []string{"M", "L", "K", "J", "I", "F", "D", "C", "B", "A"}, }, { parents: []string{"N"}, id: "O", - expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "B"}, + expectedWindowWithGenesisPadding: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "B"}, }, } diff --git a/blockdag/common_test.go b/blockdag/common_test.go index 232d33332..6180c9cf6 100644 --- a/blockdag/common_test.go +++ b/blockdag/common_test.go @@ -12,7 +12,6 @@ import ( "path/filepath" "strings" "testing" - "time" "github.com/kaspanet/kaspad/util/mstime" @@ -97,11 +96,9 @@ func (dag *BlockDAG) TestSetCoinbaseMaturity(maturity uint64) { // use of it. func newTestDAG(params *dagconfig.Params) *BlockDAG { index := newBlockIndex(params) - targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) dag := &BlockDAG{ Params: params, timeSource: NewTimeSource(), - targetTimePerBlock: targetTimePerBlock, difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize, TimestampDeviationTolerance: params.TimestampDeviationTolerance, powMaxBits: util.BigToCompact(params.PowMax), diff --git a/blockdag/dag.go b/blockdag/dag.go index ad101b2a0..622ce3d41 100644 --- a/blockdag/dag.go +++ b/blockdag/dag.go @@ -32,7 +32,9 @@ const ( // queued. maxOrphanBlocks = 100 - isDAGCurrentMaxDiff = 12 * time.Hour + // isDAGCurrentMaxDiff is the number of blocks from the network tips (estimated by timestamps) for the current + // to be considered not synced + isDAGCurrentMaxDiff = 40_000 ) // orphanBlock represents a block that we don't yet have the parent for. It @@ -74,7 +76,6 @@ type BlockDAG struct { // parameters. They are also set when the instance is created and // can't be changed afterwards, so there is no need to protect them with // a separate mutex. - targetTimePerBlock int64 // The target delay between blocks (in seconds) difficultyAdjustmentWindowSize uint64 TimestampDeviationTolerance uint64 @@ -177,7 +178,6 @@ func New(config *Config) (*BlockDAG, error) { } params := config.DAGParams - targetTimePerBlock := int64(params.TargetTimePerBlock / time.Second) index := newBlockIndex(params) dag := &BlockDAG{ @@ -186,7 +186,6 @@ func New(config *Config) (*BlockDAG, error) { timeSource: config.TimeSource, sigCache: config.SigCache, indexManager: config.IndexManager, - targetTimePerBlock: targetTimePerBlock, difficultyAdjustmentWindowSize: params.DifficultyAdjustmentWindowSize, TimestampDeviationTolerance: params.TimestampDeviationTolerance, powMaxBits: util.BigToCompact(params.PowMax), @@ -929,6 +928,11 @@ func (dag *BlockDAG) isInSelectedParentChainOf(node *blockNode, other *blockNode return dag.reachabilityTree.isReachabilityTreeAncestorOf(node, other) } +// FinalityInterval is the interval that determines the finality window of the DAG. +func (dag *BlockDAG) FinalityInterval() uint64 { + return uint64(dag.Params.FinalityDuration / dag.Params.TargetTimePerBlock) +} + // checkFinalityViolation checks the new block does not violate the finality rules // specifically - the new block selectedParent chain should contain the old finality point. func (dag *BlockDAG) checkFinalityViolation(newNode *blockNode) error { @@ -991,7 +995,7 @@ func (dag *BlockDAG) finalizeNodesBelowFinalityPoint(deleteDiffData bool) { } var nodesToDelete []*blockNode if deleteDiffData { - nodesToDelete = make([]*blockNode, 0, dag.Params.FinalityInterval) + nodesToDelete = make([]*blockNode, 0, dag.FinalityInterval()) } for len(queue) > 0 { var current *blockNode @@ -1448,7 +1452,7 @@ func (dag *BlockDAG) isSynced() bool { dagTimestamp = selectedTip.timestamp } dagTime := mstime.UnixMilliseconds(dagTimestamp) - return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff + return dag.Now().Sub(dagTime) <= isDAGCurrentMaxDiff*dag.Params.TargetTimePerBlock } // Now returns the adjusted time according to diff --git a/blockdag/dag_test.go b/blockdag/dag_test.go index ad6ccc750..dbb9ac4d1 100644 --- a/blockdag/dag_test.go +++ b/blockdag/dag_test.go @@ -957,7 +957,7 @@ func testFinalizeNodesBelowFinalityPoint(t *testing.T, deleteDiffData bool) { flushUTXODiffStore() return node } - finalityInterval := dag.Params.FinalityInterval + finalityInterval := dag.FinalityInterval() nodes := make([]*blockNode, 0, finalityInterval) currentNode := dag.genesis nodes = append(nodes, currentNode) @@ -1133,7 +1133,7 @@ func TestIsDAGCurrentMaxDiff(t *testing.T) { &dagconfig.SimnetParams, } for _, params := range netParams { - if params.TargetTimePerBlock*time.Duration(params.FinalityInterval) < isDAGCurrentMaxDiff { + if params.FinalityDuration < isDAGCurrentMaxDiff*params.TargetTimePerBlock { t.Errorf("in %s, a DAG can be considered current even if it's below the finality point", params.Name) } } diff --git a/blockdag/external_dag_test.go b/blockdag/external_dag_test.go index 9f4927c77..8f4eb65c5 100644 --- a/blockdag/external_dag_test.go +++ b/blockdag/external_dag_test.go @@ -41,7 +41,7 @@ import ( func TestFinality(t *testing.T) { params := dagconfig.SimnetParams params.K = 1 - params.FinalityInterval = 100 + params.FinalityDuration = 100 * params.TargetTimePerBlock dag, teardownFunc, err := blockdag.DAGSetup("TestFinality", true, blockdag.Config{ DAGParams: ¶ms, }) @@ -75,7 +75,7 @@ func TestFinality(t *testing.T) { currentNode := genesis // First we build a chain of params.FinalityInterval blocks for future use - for i := uint64(0); i < params.FinalityInterval; i++ { + for i := uint64(0); i < dag.FinalityInterval(); i++ { currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()}) if err != nil { t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err) @@ -87,7 +87,7 @@ func TestFinality(t *testing.T) { // Now we build a new chain of 2 * params.FinalityInterval blocks, pointed to genesis, and // we expect the block with height 1 * params.FinalityInterval to be the last finality point currentNode = genesis - for i := uint64(0); i < params.FinalityInterval; i++ { + for i := uint64(0); i < dag.FinalityInterval(); i++ { currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()}) if err != nil { t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err) @@ -96,7 +96,7 @@ func TestFinality(t *testing.T) { expectedFinalityPoint := currentNode - for i := uint64(0); i < params.FinalityInterval; i++ { + for i := uint64(0); i < dag.FinalityInterval(); i++ { currentNode, err = buildNodeToDag([]*daghash.Hash{currentNode.Hash()}) if err != nil { t.Fatalf("TestFinality: buildNodeToDag unexpectedly returned an error: %v", err) @@ -176,9 +176,19 @@ func TestFinalityInterval(t *testing.T) { &dagconfig.SimnetParams, } for _, params := range netParams { - if params.FinalityInterval > wire.MaxInvPerMsg { - t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name) - } + func() { + dag, teardownFunc, err := blockdag.DAGSetup("TestFinalityInterval", true, blockdag.Config{ + DAGParams: params, + }) + if err != nil { + t.Fatalf("Failed to setup dag instance for %s: %v", params.Name, err) + } + defer teardownFunc() + + if dag.FinalityInterval() > wire.MaxInvPerMsg { + t.Errorf("FinalityInterval in %s should be lower or equal to wire.MaxInvPerMsg", params.Name) + } + }() } } diff --git a/blockdag/ghostdag_test.go b/blockdag/ghostdag_test.go index 31e74380e..57ea5bb2e 100644 --- a/blockdag/ghostdag_test.go +++ b/blockdag/ghostdag_test.go @@ -34,7 +34,7 @@ func TestGHOSTDAG(t *testing.T) { }{ { k: 3, - expectedReds: []string{"F", "G", "H", "I", "O", "Q"}, + expectedReds: []string{"F", "G", "H", "I", "N", "P"}, dagData: []*testBlockData{ { parents: []string{"A"}, @@ -167,7 +167,7 @@ func TestGHOSTDAG(t *testing.T) { id: "T", expectedScore: 13, expectedSelectedParent: "S", - expectedBlues: []string{"S", "P", "N"}, + expectedBlues: []string{"S", "O", "Q"}, }, }, }, diff --git a/blockdag/mining.go b/blockdag/mining.go index 3b8ac9595..d0c33feeb 100644 --- a/blockdag/mining.go +++ b/blockdag/mining.go @@ -106,7 +106,7 @@ func (dag *BlockDAG) NextCoinbaseFromAddress(payToAddress util.Address, extraDat // the median timestamp of the last several blocks per the DAG consensus // rules. func (dag *BlockDAG) NextBlockMinimumTime() mstime.Time { - return dag.CalcPastMedianTime().Add(time.Second) + return dag.CalcPastMedianTime().Add(time.Millisecond) } // NextBlockTime returns a valid block time for the diff --git a/blockdag/process.go b/blockdag/process.go index 704fde44e..5e1815646 100644 --- a/blockdag/process.go +++ b/blockdag/process.go @@ -162,8 +162,7 @@ func (dag *BlockDAG) processBlockNoLock(block *util.Block, flags BehaviorFlags) // The block must not already exist in the DAG. if dag.IsInDAG(blockHash) && !wasBlockStored { - str := fmt.Sprintf("already have block %s", blockHash) - return false, false, ruleError(ErrDuplicateBlock, str) + return false, false, errors.Errorf("already have block %s", blockHash) } // The block must not already exist as an orphan. diff --git a/blockdag/process_test.go b/blockdag/process_test.go index 71ea6a9b1..95baf9236 100644 --- a/blockdag/process_test.go +++ b/blockdag/process_test.go @@ -101,7 +101,7 @@ func TestProcessDelayedBlocks(t *testing.T) { t.Fatalf("error in PrepareBlockForTest: %s", err) } - blockDelay := time.Duration(dag1.Params.TimestampDeviationTolerance*uint64(dag1.targetTimePerBlock)+5) * time.Second + blockDelay := time.Duration(dag1.Params.TimestampDeviationTolerance)*dag1.Params.TargetTimePerBlock + 5*time.Second delayedBlock.Header.Timestamp = initialTime.Add(blockDelay) isOrphan, isDelayed, err := dag1.ProcessBlock(util.NewBlock(delayedBlock), BFNoPoWCheck) @@ -203,9 +203,9 @@ func TestProcessDelayedBlocks(t *testing.T) { } // We advance the clock to the point where delayedBlock timestamp is valid. - deviationTolerance := int64(dag2.TimestampDeviationTolerance) * dag2.targetTimePerBlock + deviationTolerance := time.Duration(dag2.TimestampDeviationTolerance) * dag2.Params.TargetTimePerBlock timeUntilDelayedBlockIsValid := delayedBlock.Header.Timestamp. - Add(-time.Duration(deviationTolerance)*time.Second). + Add(-deviationTolerance). Sub(dag2.Now()) + time.Second dag2.timeSource = newFakeTimeSource(initialTime.Add(timeUntilDelayedBlockIsValid)) diff --git a/blockdag/validate.go b/blockdag/validate.go index 1e1062e0c..22f19f730 100644 --- a/blockdag/validate.go +++ b/blockdag/validate.go @@ -417,8 +417,7 @@ func (dag *BlockDAG) checkBlockHeaderSanity(block *util.Block, flags BehaviorFla // the duration of time that should be waited before the block becomes valid. // This check needs to be last as it does not return an error but rather marks the // header as delayed (and valid). - maxTimestamp := dag.Now().Add(time.Second * - time.Duration(int64(dag.TimestampDeviationTolerance)*dag.targetTimePerBlock)) + maxTimestamp := dag.Now().Add(time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock) if header.Timestamp.After(maxTimestamp) { return header.Timestamp.Sub(maxTimestamp), nil } diff --git a/blockdag/validate_test.go b/blockdag/validate_test.go index 8fcfcb220..13f160e57 100644 --- a/blockdag/validate_test.go +++ b/blockdag/validate_test.go @@ -479,7 +479,7 @@ func TestCheckBlockSanity(t *testing.T) { blockInTheFuture := Block100000 expectedDelay := 10 * time.Second - deviationTolerance := time.Duration(dag.TimestampDeviationTolerance*uint64(dag.targetTimePerBlock)) * time.Second + deviationTolerance := time.Duration(dag.TimestampDeviationTolerance) * dag.Params.TargetTimePerBlock blockInTheFuture.Header.Timestamp = dag.Now().Add(deviationTolerance + expectedDelay) delay, err = dag.checkBlockSanity(util.NewBlock(&blockInTheFuture), BFNoPoWCheck) if err != nil { diff --git a/connmanager/connmanager.go b/connmanager/connmanager.go index ef2399f80..65fceebb1 100644 --- a/connmanager/connmanager.go +++ b/connmanager/connmanager.go @@ -1,11 +1,11 @@ package connmanager import ( + "github.com/kaspanet/kaspad/addressmanager" "sync" "sync/atomic" "time" - "github.com/kaspanet/kaspad/addrmgr" "github.com/kaspanet/kaspad/netadapter" "github.com/kaspanet/kaspad/config" @@ -24,7 +24,7 @@ type connectionRequest struct { type ConnectionManager struct { cfg *config.Config netAdapter *netadapter.NetAdapter - addressManager *addrmgr.AddrManager + addressManager *addressmanager.AddressManager activeRequested map[string]*connectionRequest pendingRequested map[string]*connectionRequest @@ -44,7 +44,7 @@ type ConnectionManager struct { } // New instantiates a new instance of a ConnectionManager -func New(cfg *config.Config, netAdapter *netadapter.NetAdapter, addressManager *addrmgr.AddrManager) (*ConnectionManager, error) { +func New(cfg *config.Config, netAdapter *netadapter.NetAdapter, addressManager *addressmanager.AddressManager) (*ConnectionManager, error) { c := &ConnectionManager{ cfg: cfg, netAdapter: netAdapter, diff --git a/dagconfig/genesis.go b/dagconfig/genesis.go index e620bfae5..0a266095c 100644 --- a/dagconfig/genesis.go +++ b/dagconfig/genesis.go @@ -28,10 +28,10 @@ var genesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, genesisTxOuts // genesisHash is the hash of the first block in the block DAG for the main // network (genesis block). var genesisHash = daghash.Hash{ - 0xed, 0xac, 0xec, 0x9a, 0x9a, 0xc7, 0x24, 0x59, - 0x3a, 0x02, 0x64, 0xcd, 0x6b, 0x09, 0x63, 0x48, - 0x5a, 0xb5, 0x6f, 0xad, 0x92, 0x26, 0x5b, 0x99, - 0xb8, 0xe0, 0x25, 0x86, 0xba, 0x38, 0x87, 0x52, + 0xfa, 0x00, 0xbd, 0xcb, 0x46, 0x74, 0xc5, 0xdb, + 0xf7, 0x63, 0xcb, 0x78, 0x7a, 0x94, 0xc5, 0xbf, + 0xd4, 0x81, 0xd3, 0x52, 0x2d, 0x79, 0xac, 0x57, + 0x73, 0xe6, 0x14, 0x7e, 0x15, 0xef, 0x85, 0x27, } // genesisMerkleRoot is the hash of the first transaction in the genesis block @@ -52,9 +52,9 @@ var genesisBlock = wire.MsgBlock{ HashMerkleRoot: &genesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.Hash{}, UTXOCommitment: &daghash.ZeroHash, - Timestamp: mstime.UnixMilliseconds(0x17315ed0f99), + Timestamp: mstime.UnixMilliseconds(0x1730a81bdb4), Bits: 0x207fffff, - Nonce: 0x0, + Nonce: 0x1, }, Transactions: []*wire.MsgTx{genesisCoinbaseTx}, } @@ -126,10 +126,10 @@ var regtestGenesisCoinbaseTx = wire.NewSubnetworkMsgTx(1, []*wire.TxIn{}, regtes // devGenesisHash is the hash of the first block in the block DAG for the development // network (genesis block). var regtestGenesisHash = daghash.Hash{ - 0x66, 0xed, 0x54, 0xe6, 0x33, 0xac, 0x61, 0x0d, - 0xf3, 0x9d, 0x1f, 0xbb, 0x56, 0xec, 0x79, 0x18, - 0x4a, 0x1f, 0xb3, 0xa4, 0xf7, 0xf6, 0x7d, 0x5e, - 0xd8, 0xac, 0x9f, 0x35, 0x5d, 0x36, 0x00, 0x00, + 0xda, 0x23, 0x61, 0x5e, 0xf6, 0x2a, 0x95, 0x27, + 0x7f, 0x5a, 0x40, 0xd5, 0x91, 0x97, 0x1c, 0xef, + 0xd5, 0x86, 0xac, 0xac, 0x82, 0xb3, 0xc9, 0x43, + 0xd3, 0x49, 0x5f, 0x7e, 0x93, 0x0b, 0x35, 0x2d, } // regtestGenesisMerkleRoot is the hash of the first transaction in the genesis block @@ -150,9 +150,9 @@ var regtestGenesisBlock = wire.MsgBlock{ HashMerkleRoot: ®testGenesisMerkleRoot, AcceptedIDMerkleRoot: &daghash.Hash{}, UTXOCommitment: &daghash.ZeroHash, - Timestamp: mstime.UnixMilliseconds(0x1733106fd15), - Bits: 0x1e7fffff, - Nonce: 0x8184, + Timestamp: mstime.UnixMilliseconds(0x1730a958ac4), + Bits: 0x207fffff, + Nonce: 0x0, }, Transactions: []*wire.MsgTx{regtestGenesisCoinbaseTx}, } diff --git a/dagconfig/genesis_test.go b/dagconfig/genesis_test.go index 103495815..367c1a062 100644 --- a/dagconfig/genesis_test.go +++ b/dagconfig/genesis_test.go @@ -154,8 +154,8 @@ var genesisBlockBytes = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0xbd, 0x81, 0x0a, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x41, 0xe6, 0x78, 0x1d, 0xf7, 0xb3, 0x39, 0x66, 0x4d, 0x1a, 0x03, @@ -174,8 +174,8 @@ var regtestGenesisBlockBytes = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0xfd, 0x06, 0x31, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0x1e, 0x84, 0x81, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x8a, 0x95, 0x0a, 0x73, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xc4, 0x87, 0x77, 0xf2, 0xe7, 0x5d, 0xf7, 0xff, 0x2d, 0xbb, 0xb6, diff --git a/dagconfig/params.go b/dagconfig/params.go index 46a99297f..fd105bb0d 100644 --- a/dagconfig/params.go +++ b/dagconfig/params.go @@ -53,7 +53,6 @@ const ( timestampDeviationTolerance = 132 finalityDuration = 24 * time.Hour targetTimePerBlock = 1 * time.Second - finalityInterval = uint64(finalityDuration / targetTimePerBlock) ) // ConsensusDeployment defines details related to a specific consensus rule @@ -136,8 +135,8 @@ type Params struct { // block. TargetTimePerBlock time.Duration - // FinalityInterval is the interval that determines the finality window of the DAG. - FinalityInterval uint64 + // FinalityDuration is the duration of the finality window. + FinalityDuration time.Duration // TimestampDeviationTolerance is the maximum offset a block timestamp // is allowed to be in the future before it gets delayed @@ -203,7 +202,7 @@ var MainnetParams = Params{ BlockCoinbaseMaturity: 100, SubsidyReductionInterval: 210000, TargetTimePerBlock: targetTimePerBlock, - FinalityInterval: finalityInterval, + FinalityDuration: finalityDuration, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, TimestampDeviationTolerance: timestampDeviationTolerance, @@ -256,7 +255,7 @@ var RegressionNetParams = Params{ BlockCoinbaseMaturity: 100, SubsidyReductionInterval: 150, TargetTimePerBlock: targetTimePerBlock, - FinalityInterval: finalityInterval, + FinalityDuration: finalityDuration, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, TimestampDeviationTolerance: timestampDeviationTolerance, @@ -307,7 +306,7 @@ var TestnetParams = Params{ BlockCoinbaseMaturity: 100, SubsidyReductionInterval: 210000, TargetTimePerBlock: targetTimePerBlock, - FinalityInterval: finalityInterval, + FinalityDuration: finalityDuration, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, TimestampDeviationTolerance: timestampDeviationTolerance, @@ -363,8 +362,8 @@ var SimnetParams = Params{ PowMax: simnetPowMax, BlockCoinbaseMaturity: 100, SubsidyReductionInterval: 210000, - TargetTimePerBlock: targetTimePerBlock, - FinalityInterval: finalityInterval, + TargetTimePerBlock: time.Millisecond, + FinalityDuration: time.Minute, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, TimestampDeviationTolerance: timestampDeviationTolerance, @@ -413,7 +412,7 @@ var DevnetParams = Params{ BlockCoinbaseMaturity: 100, SubsidyReductionInterval: 210000, TargetTimePerBlock: targetTimePerBlock, - FinalityInterval: finalityInterval, + FinalityDuration: finalityDuration, DifficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, TimestampDeviationTolerance: timestampDeviationTolerance, diff --git a/kaspad.go b/kaspad.go index 1f399ecbd..649e8579c 100644 --- a/kaspad.go +++ b/kaspad.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/kaspanet/kaspad/addressmanager" "sync/atomic" "github.com/kaspanet/kaspad/dbaccess" @@ -11,8 +12,6 @@ import ( "github.com/kaspanet/kaspad/connmanager" - "github.com/kaspanet/kaspad/addrmgr" - "github.com/kaspanet/kaspad/netadapter" "github.com/kaspanet/kaspad/util/panics" @@ -33,7 +32,7 @@ import ( type kaspad struct { cfg *config.Config rpcServer *rpc.Server - addressManager *addrmgr.AddrManager + addressManager *addressmanager.AddressManager protocolManager *protocol.Manager connectionManager *connmanager.ConnectionManager @@ -123,7 +122,7 @@ func newKaspad(cfg *config.Config, databaseContext *dbaccess.DatabaseContext, in if err != nil { return nil, err } - addressManager := addrmgr.New(cfg, databaseContext) + addressManager := addressmanager.New(cfg, databaseContext) connectionManager, err := connmanager.New(cfg, netAdapter, addressManager) if err != nil { @@ -203,7 +202,7 @@ func setupMempool(cfg *config.Config, dag *blockdag.BlockDAG, sigCache *txscript func setupRPC(cfg *config.Config, dag *blockdag.BlockDAG, txMempool *mempool.TxPool, sigCache *txscript.SigCache, acceptanceIndex *indexers.AcceptanceIndex, connectionManager *connmanager.ConnectionManager, - addressManager *addrmgr.AddrManager, protocolManager *protocol.Manager) (*rpc.Server, error) { + addressManager *addressmanager.AddressManager, protocolManager *protocol.Manager) (*rpc.Server, error) { if !cfg.DisableRPC { policy := mining.Policy{ diff --git a/protocol/flowcontext/addresses.go b/protocol/flowcontext/addresses.go index 3e27b3978..bd0a94a98 100644 --- a/protocol/flowcontext/addresses.go +++ b/protocol/flowcontext/addresses.go @@ -1,8 +1,10 @@ package flowcontext -import "github.com/kaspanet/kaspad/addrmgr" +import ( + "github.com/kaspanet/kaspad/addressmanager" +) // AddressManager returns the address manager associated to the flow context. -func (f *FlowContext) AddressManager() *addrmgr.AddrManager { +func (f *FlowContext) AddressManager() *addressmanager.AddressManager { return f.addressManager } diff --git a/protocol/flowcontext/flow_context.go b/protocol/flowcontext/flow_context.go index 3a0059730..296c8ed55 100644 --- a/protocol/flowcontext/flow_context.go +++ b/protocol/flowcontext/flow_context.go @@ -1,7 +1,7 @@ package flowcontext import ( - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/connmanager" @@ -24,7 +24,7 @@ type FlowContext struct { netAdapter *netadapter.NetAdapter txPool *mempool.TxPool dag *blockdag.BlockDAG - addressManager *addrmgr.AddrManager + addressManager *addressmanager.AddressManager connectionManager *connmanager.ConnectionManager transactionsToRebroadcastLock sync.Mutex @@ -43,7 +43,7 @@ type FlowContext struct { } // New returns a new instance of FlowContext. -func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addrmgr.AddrManager, +func New(cfg *config.Config, dag *blockdag.BlockDAG, addressManager *addressmanager.AddressManager, txPool *mempool.TxPool, netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext { diff --git a/protocol/flows/addressexchange/receiveaddresses.go b/protocol/flows/addressexchange/receiveaddresses.go index a9b2e602a..201e9a02d 100644 --- a/protocol/flows/addressexchange/receiveaddresses.go +++ b/protocol/flows/addressexchange/receiveaddresses.go @@ -1,7 +1,7 @@ package addressexchange import ( - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/netadapter/router" "github.com/kaspanet/kaspad/protocol/common" @@ -13,7 +13,7 @@ import ( // ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow. type ReceiveAddressesContext interface { Config() *config.Config - AddressManager() *addrmgr.AddrManager + AddressManager() *addressmanager.AddressManager } // ReceiveAddresses asks a peer for more addresses if needed. @@ -37,8 +37,8 @@ func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Rou } msgAddresses := message.(*wire.MsgAddresses) - if len(msgAddresses.AddrList) > addrmgr.GetAddressesMax { - return protocolerrors.Errorf(true, "address count excceeded %d", addrmgr.GetAddressesMax) + if len(msgAddresses.AddrList) > addressmanager.GetAddressesMax { + return protocolerrors.Errorf(true, "address count excceeded %d", addressmanager.GetAddressesMax) } if msgAddresses.IncludeAllSubnetworks { diff --git a/protocol/flows/addressexchange/sendaddresses.go b/protocol/flows/addressexchange/sendaddresses.go index 1490a3d2f..cff66ecbf 100644 --- a/protocol/flows/addressexchange/sendaddresses.go +++ b/protocol/flows/addressexchange/sendaddresses.go @@ -1,7 +1,7 @@ package addressexchange import ( - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/netadapter/router" "github.com/kaspanet/kaspad/wire" "math/rand" @@ -9,7 +9,7 @@ import ( // SendAddressesContext is the interface for the context needed for the SendAddresses flow. type SendAddressesContext interface { - AddressManager() *addrmgr.AddrManager + AddressManager() *addressmanager.AddressManager } // SendAddresses sends addresses to a peer that requests it. diff --git a/protocol/flows/handshake/handshake.go b/protocol/flows/handshake/handshake.go index cdee98032..4087b496a 100644 --- a/protocol/flows/handshake/handshake.go +++ b/protocol/flows/handshake/handshake.go @@ -1,12 +1,12 @@ package handshake import ( + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/protocol/common" "github.com/kaspanet/kaspad/protocol/protocolerrors" "sync" "sync/atomic" - "github.com/kaspanet/kaspad/addrmgr" "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/netadapter" @@ -23,7 +23,7 @@ type HandleHandshakeContext interface { Config() *config.Config NetAdapter() *netadapter.NetAdapter DAG() *blockdag.BlockDAG - AddressManager() *addrmgr.AddrManager + AddressManager() *addressmanager.AddressManager StartIBDIfRequired() AddToPeers(peer *peerpkg.Peer) error } diff --git a/protocol/manager.go b/protocol/manager.go index 68eb9e0e5..347bb64c3 100644 --- a/protocol/manager.go +++ b/protocol/manager.go @@ -1,7 +1,7 @@ package protocol import ( - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/blockdag" "github.com/kaspanet/kaspad/config" "github.com/kaspanet/kaspad/connmanager" @@ -21,7 +21,7 @@ type Manager struct { // NewManager creates a new instance of the p2p protocol manager func NewManager(cfg *config.Config, dag *blockdag.BlockDAG, - addressManager *addrmgr.AddrManager, txPool *mempool.TxPool, + addressManager *addressmanager.AddressManager, txPool *mempool.TxPool, connectionManager *connmanager.ConnectionManager) (*Manager, error) { netAdapter, err := netadapter.NewNetAdapter(cfg) diff --git a/rpc/handle_get_peer_addresses.go b/rpc/handle_get_peer_addresses.go index 9863e88a8..b8dece8ca 100644 --- a/rpc/handle_get_peer_addresses.go +++ b/rpc/handle_get_peer_addresses.go @@ -1,6 +1,9 @@ package rpc -import "github.com/kaspanet/kaspad/rpc/model" +import ( + "github.com/kaspanet/kaspad/addressmanager" + "github.com/kaspanet/kaspad/rpc/model" +) // handleGetPeerAddresses handles getPeerAddresses commands. func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { @@ -21,8 +24,8 @@ func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{ for i, addr := range peersState.Addresses { rpcPeersState.Addresses[i] = &model.GetPeerAddressesKnownAddressResult{ - Addr: addr.Addr, - Src: addr.Src, + Addr: string(addr.Address), + Src: string(addr.SourceAddress), SubnetworkID: addr.SubnetworkID, Attempts: addr.Attempts, TimeStamp: addr.TimeStamp, @@ -31,27 +34,35 @@ func handleGetPeerAddresses(s *Server, cmd interface{}, closeChan <-chan struct{ } } - for subnetworkID, bucket := range peersState.NewBuckets { + for subnetworkID, bucket := range peersState.SubnetworkNewAddressBucketArrays { rpcPeersState.NewBuckets[subnetworkID] = &model.GetPeerAddressesNewBucketResult{} for i, addr := range bucket { - rpcPeersState.NewBuckets[subnetworkID][i] = addr + rpcPeersState.NewBuckets[subnetworkID][i] = convertAddressKeySliceToString(addr) } } - for i, addr := range peersState.NewBucketFullNodes { - rpcPeersState.NewBucketFullNodes[i] = addr + for i, addr := range peersState.FullNodeNewAddressBucketArray { + rpcPeersState.NewBucketFullNodes[i] = convertAddressKeySliceToString(addr) } - for subnetworkID, bucket := range peersState.TriedBuckets { + for subnetworkID, bucket := range peersState.SubnetworkTriedAddressBucketArrays { rpcPeersState.TriedBuckets[subnetworkID] = &model.GetPeerAddressesTriedBucketResult{} for i, addr := range bucket { - rpcPeersState.TriedBuckets[subnetworkID][i] = addr + rpcPeersState.TriedBuckets[subnetworkID][i] = convertAddressKeySliceToString(addr) } } - for i, addr := range peersState.TriedBucketFullNodes { - rpcPeersState.TriedBucketFullNodes[i] = addr + for i, addr := range peersState.FullNodeTriedAddressBucketArray { + rpcPeersState.TriedBucketFullNodes[i] = convertAddressKeySliceToString(addr) } return rpcPeersState, nil } + +func convertAddressKeySliceToString(addressKeys []addressmanager.AddressKey) []string { + strings := make([]string, len(addressKeys)) + for j, addr := range addressKeys { + strings[j] = string(addr) + } + return strings +} diff --git a/rpc/model/rpc_results.go b/rpc/model/rpc_results.go index f09027a91..c968a2aaf 100644 --- a/rpc/model/rpc_results.go +++ b/rpc/model/rpc_results.go @@ -6,7 +6,7 @@ package model import ( "encoding/json" - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" ) // GetBlockHeaderVerboseResult models the data from the getblockheader command when @@ -240,10 +240,10 @@ type GetPeerAddressesKnownAddressResult struct { } // GetPeerAddressesNewBucketResult models a GetPeerAddressesResult new bucket. -type GetPeerAddressesNewBucketResult [addrmgr.NewBucketCount][]string +type GetPeerAddressesNewBucketResult [addressmanager.NewBucketCount][]string // GetPeerAddressesTriedBucketResult models a GetPeerAddressesResult tried bucket. -type GetPeerAddressesTriedBucketResult [addrmgr.TriedBucketCount][]string +type GetPeerAddressesTriedBucketResult [addressmanager.TriedBucketCount][]string // GetRawMempoolVerboseResult models the data returned from the getrawmempool // command when the verbose flag is set. When the verbose flag is not set, diff --git a/rpc/rpcserver.go b/rpc/rpcserver.go index 75c3ddb06..1ad1df259 100644 --- a/rpc/rpcserver.go +++ b/rpc/rpcserver.go @@ -12,7 +12,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "github.com/kaspanet/kaspad/addrmgr" + "github.com/kaspanet/kaspad/addressmanager" "github.com/kaspanet/kaspad/connmanager" "github.com/kaspanet/kaspad/protocol" "github.com/kaspanet/kaspad/util/mstime" @@ -168,7 +168,7 @@ type Server struct { acceptanceIndex *indexers.AcceptanceIndex blockTemplateGenerator *mining.BlkTmplGenerator connectionManager *connmanager.ConnectionManager - addressManager *addrmgr.AddrManager + addressManager *addressmanager.AddressManager protocolManager *protocol.Manager } @@ -694,7 +694,7 @@ func NewRPCServer( acceptanceIndex *indexers.AcceptanceIndex, blockTemplateGenerator *mining.BlkTmplGenerator, connectionManager *connmanager.ConnectionManager, - addressManager *addrmgr.AddrManager, + addressManager *addressmanager.AddressManager, protocolManager *protocol.Manager, ) (*Server, error) { // Setup listeners for the configured RPC listen addresses and