Merge pull request #6486 from glevand/for-merge-arm64

Get tests working on ARM64
This commit is contained in:
Xiang Li 2016-10-06 17:53:10 -07:00 committed by GitHub
commit f3cdfcdcf4
4 changed files with 54 additions and 32 deletions

View File

@ -49,51 +49,50 @@ Finished defragmenting etcd member[127.0.0.1:2379]
## Space quota
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. After freeing enough space in the keyspace, the alarm can be disarmed and the cluster will resume normal operation.
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. Only after freeing enough space in the keyspace and defragmenting the backend database, along with clearing the space quota alarm can the cluster resume normal operation.
By default, `etcd` sets a conservative space quota suitable for most applications, but it may be configured on the command line, in bytes:
```sh
# set a very small 16MB quota
$ etcd --quota-backend-bytes=16777216
$ etcd --quota-backend-bytes=$((16*1024*1024))
```
The space quota can be triggered with a loop:
```sh
# fill keyspace
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break; done
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | ETCDCTL_API=3 etcdctl put key || break; done
...
Error: rpc error: code = 8 desc = etcdserver: mvcc: database space exceeded
# confirm quota space is exceeded
$ etcdctl --write-out=table endpoint status
$ ETCDCTL_API=3 etcdctl --write-out=table endpoint status
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| 127.0.0.1:2379 | bf9071f4639c75cc | 2.3.0+git | 18 MB | true | 2 | 3332 |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
# confirm alarm is raised
$ etcdctl alarm list
$ ETCDCTL_API=3 etcdctl alarm list
memberID:13803658152347727308 alarm:NOSPACE
```
Removing excessive keyspace data will put the cluster back within the quota limits so the alarm can be disarmed:
Removing excessive keyspace data and defragmenting the backend database will put the cluster back within the quota limits:
```sh
# get current revision
$ etcdctl --endpoints=:2379 endpoint status
[{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":8925027824743593106,"member_id":13803658152347727308,"revision":1516,"raft_term":2},"version":"2.3.0+git","dbSize":17973248,"leader":13803658152347727308,"raftIndex":6359,"raftTerm":2}}]
$ rev=$(ETCDCTL_API=3 etcdctl --endpoints=:2379 endpoint status --write-out="json" | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*')
# compact away all old revisions
$ etdctl compact 1516
$ ETCDCTL_API=3 etdctl compact $rev
compacted revision 1516
# defragment away excessive space
$ etcdctl defrag
$ ETCDCTL_API=3 etcdctl defrag
Finished defragmenting etcd member[127.0.0.1:2379]
# disarm alarm
$ etcdctl alarm disarm
$ ETCDCTL_API=3 etcdctl alarm disarm
memberID:13803658152347727308 alarm:NOSPACE
# test puts are allowed again
$ etdctl put newkey 123
$ ETCDCTL_API=3 etcdctl put newkey 123
OK
```

View File

@ -17,6 +17,7 @@ package integration
import (
"bytes"
"math/rand"
"os"
"reflect"
"strings"
"testing"
@ -35,8 +36,8 @@ func TestKVPutError(t *testing.T) {
defer testutil.AfterTest(t)
var (
maxReqBytes = 1.5 * 1024 * 1024
quota = int64(maxReqBytes * 1.2)
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
quota = int64(int(maxReqBytes) + 8*os.Getpagesize())
)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
defer clus.Terminate(t)
@ -49,7 +50,7 @@ func TestKVPutError(t *testing.T) {
t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
}
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) // 1.5MB
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100)))
if err != rpctypes.ErrRequestTooLarge {
t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
}
@ -59,7 +60,7 @@ func TestKVPutError(t *testing.T) {
t.Fatal(err)
}
time.Sleep(500 * time.Millisecond) // give enough time for commit
time.Sleep(1 * time.Second) // give enough time for commit
_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
if err != rpctypes.ErrNoSpace { // over quota

View File

@ -15,44 +15,62 @@
package e2e
import (
"os"
"strings"
"testing"
)
func TestCtlV3Alarm(t *testing.T) { testCtl(t, alarmTest, withQuota(64*1024)) }
func TestCtlV3Alarm(t *testing.T) {
// The boltdb minimum working set is six pages.
testCtl(t, alarmTest, withQuota(int64(13*os.Getpagesize())))
}
func alarmTest(cx ctlCtx) {
// test small put still works
smallbuf := strings.Repeat("a", int(cx.quotaBackendBytes/100))
if err := ctlV3Put(cx, "abc", smallbuf, ""); err != nil {
smallbuf := strings.Repeat("a", 64)
if err := ctlV3Put(cx, "1st_test", smallbuf, ""); err != nil {
cx.t.Fatal(err)
}
// test big put (to be rejected, and trigger quota alarm)
bigbuf := strings.Repeat("a", int(cx.quotaBackendBytes))
if err := ctlV3Put(cx, "abc", bigbuf, ""); err != nil {
if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") {
cx.t.Fatal(err)
// write some chunks to fill up the database
buf := strings.Repeat("b", int(os.Getpagesize()))
var rev int64
for ; ; rev++ {
if err := ctlV3Put(cx, "2nd_test", buf, ""); err != nil {
if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") {
cx.t.Fatal(err)
}
break
}
}
// quota alarm should now be on
if err := ctlV3Alarm(cx, "list", "alarm:NOSPACE"); err != nil {
cx.t.Fatal(err)
}
// alarm is on rejecting Puts and Txns
if err := ctlV3Put(cx, "def", smallbuf, ""); err != nil {
// check that Put is rejected when alarm is on
if err := ctlV3Put(cx, "3rd_test", smallbuf, ""); err != nil {
if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") {
cx.t.Fatal(err)
}
}
// make some space
if err := ctlV3Compact(cx, rev, true); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3Defrag(cx); err != nil {
cx.t.Fatal(err)
}
// turn off alarm
if err := ctlV3Alarm(cx, "disarm", "alarm:NOSPACE"); err != nil {
cx.t.Fatal(err)
}
// put one more key below quota
if err := ctlV3Put(cx, "ghi", smallbuf, ""); err != nil {
if err := ctlV3Put(cx, "4th_test", smallbuf, ""); err != nil {
cx.t.Fatal(err)
}
}

View File

@ -17,6 +17,7 @@ package integration
import (
"fmt"
"math/rand"
"os"
"reflect"
"testing"
"time"
@ -583,10 +584,12 @@ func TestV3Hash(t *testing.T) {
// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer
func TestV3StorageQuotaAPI(t *testing.T) {
defer testutil.AfterTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
clus.Members[0].QuotaBackendBytes = 64 * 1024
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
@ -602,7 +605,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
}
// test big put
bigbuf := make([]byte, 64*1024)
bigbuf := make([]byte, quotasize)
_, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf})
if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) {
t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace)
@ -628,14 +631,15 @@ func TestV3StorageQuotaAPI(t *testing.T) {
// TestV3StorageQuotaApply tests the V3 server respects quotas during apply
func TestV3StorageQuotaApply(t *testing.T) {
testutil.AfterTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 2})
defer clus.Terminate(t)
kvc0 := toGRPC(clus.Client(0)).KV
kvc1 := toGRPC(clus.Client(1)).KV
// force a node to have a different quota
clus.Members[0].QuotaBackendBytes = 64 * 1024
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
@ -650,7 +654,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
}
// test big put
bigbuf := make([]byte, 64*1024)
bigbuf := make([]byte, quotasize)
_, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf})
if err != nil {
t.Fatal(err)