discovery: use leveled log

This commit is contained in:
Xiang Li 2015-06-11 09:48:10 -07:00
parent f013a627a4
commit 2db8b53c4b
2 changed files with 16 additions and 15 deletions

View File

@ -17,7 +17,6 @@ package discovery
import (
"errors"
"fmt"
"log"
"math"
"net/http"
"net/url"
@ -31,9 +30,12 @@ import (
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/pkg/capnslog"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery")
ErrInvalidURL = errors.New("discovery: invalid URL")
ErrBadSizeKey = errors.New("discovery: size key is bad")
ErrSizeNotFound = errors.New("discovery: size key not found")
@ -102,7 +104,7 @@ func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) {
return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
}
log.Printf("discovery: using proxy %q", proxyURL.String())
plog.Infof("using proxy %q", proxyURL.String())
return http.ProxyURL(proxyURL), nil
}
@ -250,7 +252,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
func (d *discovery) logAndBackoffForRetry(step string) {
d.retries++
retryTime := time.Second * (0x1 << d.retries)
log.Println("discovery: during", step, "connection to", d.url, "timed out, retrying in", retryTime)
plog.Infoln("during", step, "connection to", d.url, "timed out, retrying in", retryTime)
d.clock.Sleep(retryTime)
}
@ -284,15 +286,15 @@ func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*
copy(all, nodes)
for _, n := range all {
if path.Base(n.Key) == path.Base(d.selfKey()) {
log.Printf("discovery: found self %s in the cluster", path.Base(d.selfKey()))
plog.Noticef("found self %s in the cluster", path.Base(d.selfKey()))
} else {
log.Printf("discovery: found peer %s in the cluster", path.Base(n.Key))
plog.Noticef("found peer %s in the cluster", path.Base(n.Key))
}
}
// wait for others
for len(all) < size {
log.Printf("discovery: found %d peer(s), waiting for %d more", len(all), size-len(all))
plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all))
resp, err := w.Next(context.Background())
if err != nil {
if err == context.DeadlineExceeded {
@ -300,10 +302,10 @@ func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*
}
return nil, err
}
log.Printf("discovery: found peer %s in the cluster", path.Base(resp.Node.Key))
plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key))
all = append(all, resp.Node)
}
log.Printf("discovery: found %d needed peer(s)", len(all))
plog.Noticef("found %d needed peer(s)", len(all))
return all, nil
}

View File

@ -16,7 +16,6 @@ package discovery
import (
"fmt"
"log"
"net"
"strings"
@ -41,7 +40,7 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
for _, url := range apurls {
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
log.Printf("discovery: Couldn't resolve host %s during SRV discovery", url.Host)
plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host)
return "", "", err
}
tcpAPUrls = append(tcpAPUrls, tcpAddr.String())
@ -57,7 +56,7 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
host := net.JoinHostPort(target, fmt.Sprintf("%d", srv.Port))
tcpAddr, err := resolveTCPAddr("tcp", host)
if err != nil {
log.Printf("discovery: Couldn't resolve host %s during SRV discovery", host)
plog.Warningf("couldn't resolve host %s during SRV discovery", host)
continue
}
n := ""
@ -71,7 +70,7 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
tempName += 1
}
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, host))
log.Printf("discovery: Got bootstrap from DNS for %s at %s%s", service, prefix, host)
plog.Noticef("got bootstrap from DNS for %s at %s%s", service, prefix, host)
}
return nil
}
@ -79,16 +78,16 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
failCount := 0
err := updateNodeMap("etcd-server-ssl", "https://")
if err != nil {
log.Printf("discovery: Error querying DNS SRV records for _etcd-server-ssl %s", err)
plog.Warningf("error querying DNS SRV records for _etcd-server-ssl %s", err)
failCount += 1
}
err = updateNodeMap("etcd-server", "http://")
if err != nil {
log.Printf("discovery: Error querying DNS SRV records for _etcd-server %s", err)
plog.Warningf("discovery: error querying DNS SRV records for _etcd-server %s", err)
failCount += 1
}
if failCount == 2 {
log.Printf("discovery: SRV discovery failed: too many errors querying DNS SRV records")
plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records")
return "", "", err
}
return strings.Join(stringParts, ","), defaultToken, nil