From 2fd92f035624024131bc06b5348826ef97d07ee0 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 09:17:35 +0100 Subject: [PATCH 01/53] Tell Git to ignore .pem private key files from AWS --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index d8010917..16bf3f03 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,5 @@ target/ # pyenv .python-version +# Private key files from AWS +*.pem From 68e6d7f615da9f4bf2dcc80d11c03c9b670508ca Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 14:23:08 +0100 Subject: [PATCH 02/53] Copied code from ascribe/bigchain-depoloyment repo --- bigchain-deployment/Readme.md | 19 ++ bigchain-deployment/bigchain/bigchaindb.conf | 107 ++++++++++ .../bigchain/bigchaindb.conf.template | 105 ++++++++++ bigchain-deployment/create_hostlist.py | 68 ++++++ bigchain-deployment/fab_prepare_chain.py | 28 +++ bigchain-deployment/fabfile.py | 194 ++++++++++++++++++ bigchain-deployment/get_elastic_ips.py | 64 ++++++ bigchain-deployment/get_instance_status.py | 99 +++++++++ bigchain-deployment/hostlist.py | 1 + bigchain-deployment/run_and_tag.py | 49 +++++ bigchain-deployment/startup.sh | 97 +++++++++ 11 files changed, 831 insertions(+) create mode 100644 bigchain-deployment/Readme.md create mode 100644 bigchain-deployment/bigchain/bigchaindb.conf create mode 100644 bigchain-deployment/bigchain/bigchaindb.conf.template create mode 100644 bigchain-deployment/create_hostlist.py create mode 100644 bigchain-deployment/fab_prepare_chain.py create mode 100644 bigchain-deployment/fabfile.py create mode 100644 bigchain-deployment/get_elastic_ips.py create mode 100644 bigchain-deployment/get_instance_status.py create mode 100644 bigchain-deployment/hostlist.py create mode 100644 bigchain-deployment/run_and_tag.py create mode 100755 bigchain-deployment/startup.sh diff --git a/bigchain-deployment/Readme.md b/bigchain-deployment/Readme.md new file mode 100644 index 00000000..359b0898 --- /dev/null +++ b/bigchain-deployment/Readme.md @@ -0,0 +1,19 @@ +## Create and configure the storage backend in Amazon's Cloud + +#### Getting started +- Checkout bigchaindb and copy bigchain-deployment to bigchaindb repository + +#### Prerequesites + - Valid AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY is needed, both are exported as variables to the shell + - awscli, + - boto + - fabric w/ fabtools + +#### Cluster Installation + - Got to the DEPLOY-directory and run './startup.sh' with two parameters (tag and number of nodes)...that's it! + e.g.: ./startup.sh bro 7 to install a cluster tagged as bro with seven nodes. + +#### If an error occurs... +There are some issues during the rollout on Amazon (presumably also in other cloud/virtual environments): if you tested with a high sequence it might be possible, that you run into: + - NetworkError: Host key for ec2-xx-xx-xx-xx.eu-central-1.compute.amazonaws.com did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack. +If so, just clean up your known_hosts file and start again. diff --git a/bigchain-deployment/bigchain/bigchaindb.conf b/bigchain-deployment/bigchain/bigchaindb.conf new file mode 100644 index 00000000..3d64abb5 --- /dev/null +++ b/bigchain-deployment/bigchain/bigchaindb.conf @@ -0,0 +1,107 @@ +# +# RethinkDB instance configuration sample +# +# - Give this file the extension .conf and put it in /etc/rethinkdb/instances.d in order to enable it. +# - See http://www.rethinkdb.com/docs/guides/startup/ for the complete documentation +# - Uncomment an option to change its value. +# + +############################### +## RethinkDB configuration +############################### + +### Process options + +## User and group used to run rethinkdb +## Command line default: do not change user or group +## Init script default: rethinkdb user and group +# runuser=rethinkdb +# rungroup=rethinkdb + +## Stash the pid in this file when the process is running +## Note for systemd users: Systemd uses its own internal mechanism. Do not set this parameter. +## Command line default: none +## Init script default: /var/run/rethinkdb//pid_file (where is the name of this config file without the extension) +# pid-file=/var/run/rethinkdb/rethinkdb.pid + +### File path options + +## Directory to store data and metadata +## Command line default: ./rethinkdb_data +## Init script default: /var/lib/rethinkdb// (where is the name of this file without the extension) +directory=/data + +## Log file options +## Default: /log_file +#log-file=/var/log/rethinkdb + +### Network options + +## Address of local interfaces to listen on when accepting connections +## May be 'all' or an IP address, loopback addresses are enabled by default +## Default: all local addresses +# bind=127.0.0.1 +bind=all + +## Address that other rethinkdb instances will use to connect to this server. +## It can be specified multiple times +# canonical-address= + +## The port for rethinkdb protocol for client drivers +## Default: 28015 + port-offset +# driver-port=28015 + +## The port for receiving connections from other nodes +## Default: 29015 + port-offset +# cluster-port=29015 + +## The host:port of a node that rethinkdb will connect to +## This option can be specified multiple times. +## Default: none +# join=example.com:29015 + +## All ports used locally will have this value added +## Default: 0 +# port-offset=0 + +## r.http(...) queries will use the given server as a web proxy +## Default: no proxy +# reql-http-proxy=socks5://example.com:1080 + +### Web options + +## Port for the http admin console +## Default: 8080 + port-offset +# http-port=8080 + +## Disable web administration console +# no-http-admin + +### CPU options + +## The number of cores to use +## Default: total number of cores of the CPU +# cores=2 + +### Memory options + +## Size of the cache in MB +## Default: Half of the available RAM on startup +# cache-size=1024 + +### Disk + +## How many simultaneous I/O operations can happen at the same time +# io-threads=64 +#io-threads=128 + +## Enable direct I/O +direct-io + +### Meta + +## The name for this server (as will appear in the metadata). +## If not specified, it will be randomly chosen from a short list of names. +# server-name=server1 +## The host:port of a node that rethinkdb will connect to +join=ec2-52-58-94-210.eu-central-1.compute.amazonaws.com:29015 diff --git a/bigchain-deployment/bigchain/bigchaindb.conf.template b/bigchain-deployment/bigchain/bigchaindb.conf.template new file mode 100644 index 00000000..c1541979 --- /dev/null +++ b/bigchain-deployment/bigchain/bigchaindb.conf.template @@ -0,0 +1,105 @@ +# +# RethinkDB instance configuration sample +# +# - Give this file the extension .conf and put it in /etc/rethinkdb/instances.d in order to enable it. +# - See http://www.rethinkdb.com/docs/guides/startup/ for the complete documentation +# - Uncomment an option to change its value. +# + +############################### +## RethinkDB configuration +############################### + +### Process options + +## User and group used to run rethinkdb +## Command line default: do not change user or group +## Init script default: rethinkdb user and group +# runuser=rethinkdb +# rungroup=rethinkdb + +## Stash the pid in this file when the process is running +## Note for systemd users: Systemd uses its own internal mechanism. Do not set this parameter. +## Command line default: none +## Init script default: /var/run/rethinkdb//pid_file (where is the name of this config file without the extension) +# pid-file=/var/run/rethinkdb/rethinkdb.pid + +### File path options + +## Directory to store data and metadata +## Command line default: ./rethinkdb_data +## Init script default: /var/lib/rethinkdb// (where is the name of this file without the extension) +directory=/data + +## Log file options +## Default: /log_file +#log-file=/var/log/rethinkdb + +### Network options + +## Address of local interfaces to listen on when accepting connections +## May be 'all' or an IP address, loopback addresses are enabled by default +## Default: all local addresses +# bind=127.0.0.1 +bind=all + +## Address that other rethinkdb instances will use to connect to this server. +## It can be specified multiple times +# canonical-address= + +## The port for rethinkdb protocol for client drivers +## Default: 28015 + port-offset +# driver-port=28015 + +## The port for receiving connections from other nodes +## Default: 29015 + port-offset +# cluster-port=29015 + +## The host:port of a node that rethinkdb will connect to +## This option can be specified multiple times. +## Default: none +# join=example.com:29015 + +## All ports used locally will have this value added +## Default: 0 +# port-offset=0 + +## r.http(...) queries will use the given server as a web proxy +## Default: no proxy +# reql-http-proxy=socks5://example.com:1080 + +### Web options + +## Port for the http admin console +## Default: 8080 + port-offset +# http-port=8080 + +## Disable web administration console +# no-http-admin + +### CPU options + +## The number of cores to use +## Default: total number of cores of the CPU +# cores=2 + +### Memory options + +## Size of the cache in MB +## Default: Half of the available RAM on startup +# cache-size=1024 + +### Disk + +## How many simultaneous I/O operations can happen at the same time +# io-threads=64 +#io-threads=128 + +## Enable direct I/O +direct-io + +### Meta + +## The name for this server (as will appear in the metadata). +## If not specified, it will be randomly chosen from a short list of names. +# server-name=server1 diff --git a/bigchain-deployment/create_hostlist.py b/bigchain-deployment/create_hostlist.py new file mode 100644 index 00000000..5793d06f --- /dev/null +++ b/bigchain-deployment/create_hostlist.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import json +import argparse +import boto.ec2 +import os + +AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] +AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] + +parser = argparse.ArgumentParser() +parser.add_argument("--tag", help="tag instances in aws") +args = parser.parse_args() + +conn = boto.ec2.connect_to_region("eu-central-1", + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) + +PUBLIC_LIST = [] +PRIVATE_LIST = [] +INSTANCE_IDS = [] + +if args.tag: + tag = args.tag +else: + # reading credentials from config for remote connection + print('usage: python3 create_hostlist.py --tag ') + print('reason: tag missing!!!') + exit(1) + + +def prepare_list(tag): + reservations = conn.get_all_instances(filters={"tag:Name" : tag}) + instances = [i for r in reservations for i in r.instances] + for i in instances: + inst = i.__dict__ + publdns = inst.get('public_dns_name') + privdns = inst.get('private_dns_name') + inst_id = inst.get('id') + PUBLIC_LIST.append(publdns) + PRIVATE_LIST.append(privdns) + INSTANCE_IDS.append(inst_id) + return PUBLIC_LIST, PRIVATE_LIST, INSTANCE_IDS + + +# get lists from amazon +publist, privlist, instlist = prepare_list(tag) + +# create shellscript for adding remote keys to known_hosts +localFile = open('add2known_hosts.sh', 'w') +localFile.write('#! /bin/bash\n') +for entry in range(0,len(publist)): + localFile.write('ssh-keyscan ' + publist[entry] + ' >> ~/.ssh/known_hosts\n') +localFile.close() + +# hostliste und id-liste aus json erzeugen +hosts = publist +localFile = open('add2dbconf', 'w') +before = 'join=' +after = ':29015' +localFile.write('## The host:port of a node that rethinkdb will connect to\n') +for entry in range(0,int(len(hosts)/2)): + localFile.write(before + hosts[entry] + after + '\n') + + +# printout hostlist +print ("hosts_dev = ", publist) diff --git a/bigchain-deployment/fab_prepare_chain.py b/bigchain-deployment/fab_prepare_chain.py new file mode 100644 index 00000000..98aedd7d --- /dev/null +++ b/bigchain-deployment/fab_prepare_chain.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" Generating genesis block +""" + +from __future__ import with_statement + +from fabric import colors as c +from fabric.api import * +from fabric.api import local, puts, settings, hide, abort, lcd, prefix +from fabric.api import run, sudo, cd, get, local, lcd, env, hide +from fabric.api import task, parallel +from fabric.contrib import files +from fabric.contrib.files import append, exists +from fabric.contrib.console import confirm +from fabric.contrib.project import rsync_project +from fabric.operations import run, put +from fabric.context_managers import settings +from fabric.decorators import roles +from fabtools import * + +env.user = 'ubuntu' +env.key_filename = 'pem/bigchain.pem' + +@task +def init_bigchaindb(): + run('bigchaindb -y start &', pty = False) diff --git a/bigchain-deployment/fabfile.py b/bigchain-deployment/fabfile.py new file mode 100644 index 00000000..47eb0a81 --- /dev/null +++ b/bigchain-deployment/fabfile.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" Preparing, installing and configuring + bigchain and the storage backend +""" + +from __future__ import with_statement + +import requests +from time import * +import os +from datetime import datetime, timedelta +import json +from pprint import pprint + +from fabric import colors as c +from fabric.api import * +from fabric.api import local, puts, settings, hide, abort, lcd, prefix +from fabric.api import run, sudo, cd, get, local, lcd, env, hide +from fabric.api import task, parallel +from fabric.contrib import files +from fabric.contrib.files import append, exists +from fabric.contrib.console import confirm +from fabric.contrib.project import rsync_project +from fabric.operations import run, put +from fabric.context_managers import settings +from fabric.decorators import roles +from fabtools import * + +from hostlist import hosts_dev + +env.hosts = hosts_dev +env.roledefs = { + "role1": hosts_dev, + "role2": [hosts_dev[0]], + } +env.roles = ["role1"] +env.user = 'ubuntu' +env.key_filename = 'pem/bigchain.pem' + + +################################################################################ + +# base softwarestack rollout +@task +@parallel +def install_base_software(): + sudo('apt-get -y update') + sudo('dpkg --configure -a') + sudo('apt-get -y -f install') + sudo('apt-get -y install build-essential wget bzip2 ca-certificates \ + libglib2.0-0 libxext6 libsm6 libxrender1 libssl-dev \ + git gcc g++ python-dev libboost-python-dev libffi-dev \ + software-properties-common python-software-properties \ + python3-pip ipython3 sysstat s3cmd') + + +# RethinkDB +@task +@parallel +def install_rethinkdb(): + """Installation of RethinkDB""" + with settings(warn_only=True): + # preparing filesystem + sudo("mkdir -p /data") + # Locally mounted storage (m3.2xlarge, aber auch c3.xxx) + try: + sudo("umount /mnt") + sudo("mkfs -t ext4 /dev/xvdb") + sudo("mount /dev/xvdb /data") + except: + pass + + # persist settings to fstab + sudo("rm -rf /etc/fstab") + sudo("echo 'LABEL=cloudimg-rootfs / ext4 defaults,discard 0 0' >> /etc/fstab") + sudo("echo '/dev/xvdb /data ext4 defaults,noatime 0 0' >> /etc/fstab") + # activate deadline scheduler + with settings(sudo_user='root'): + sudo("echo deadline > /sys/block/xvdb/queue/scheduler") + # install rethinkdb + sudo("echo 'deb http://download.rethinkdb.com/apt trusty main' | sudo tee /etc/apt/sources.list.d/rethinkdb.list") + sudo("wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -") + sudo("apt-get update") + sudo("apt-get -y install rethinkdb") + # change fs to user + sudo('chown -R rethinkdb:rethinkdb /data') + # copy config file to target system + put('bigchain/bigchaindb.conf', + '/etc/rethinkdb/instances.d/instance1.conf', mode=0600, use_sudo=True) + # initialize data-dir + sudo('rm -rf /data/*') + # finally restart instance + sudo('/etc/init.d/rethinkdb restart') + + +# bigchain deployment +@task +@parallel +def install_bigchain(): + sudo('python3 -m pip install bigchaindb') + + +# startup all nodes of bigchaindb in cluster +@task +@parallel +def start_bigchain_nodes(): + sudo('screen -d -m bigchaindb -y start &', pty = False) + + +@task +def install_newrelic(): + with settings(warn_only=True): + sudo('echo deb http://apt.newrelic.com/debian/ newrelic non-free >> /etc/apt/sources.list') + #sudo('apt-key adv --keyserver hkp://subkeys.pgp.net --recv-keys 548C16BF') + sudo('apt-get update') + sudo('apt-get -y --force-yes install newrelic-sysmond') + sudo('nrsysmond-config --set license_key=c88af00c813983f8ee12e9b455aa13fde1cddaa8') + sudo('/etc/init.d/newrelic-sysmond restart') + + +############################### +# Security / FirewallStuff next +############################### + +@task +def harden_sshd(): + """Security harden sshd.""" + + # Disable password authentication + sed('/etc/ssh/sshd_config', + '#PasswordAuthentication yes', + 'PasswordAuthentication no', + use_sudo=True) + # Deny root login + sed('/etc/ssh/sshd_config', + 'PermitRootLogin yes', + 'PermitRootLogin no', + use_sudo=True) + + +@task +def disable_root_login(): + """Disable `root` login for even more security. Access to `root` account + is now possible by first connecting with your dedicated maintenance + account and then running ``sudo su -``.""" + sudo('passwd --lock root') + + +@task +def set_fw(): + # snmp + sudo('iptables -A INPUT -p tcp --dport 161 -j ACCEPT') + sudo('iptables -A INPUT -p udp --dport 161 -j ACCEPT') + # dns + sudo('iptables -A OUTPUT -p udp -o eth0 --dport 53 -j ACCEPT') + sudo('iptables -A INPUT -p udp -i eth0 --sport 53 -j ACCEPT') + # rethinkdb + sudo('iptables -A INPUT -p tcp --dport 28015 -j ACCEPT') + sudo('iptables -A INPUT -p udp --dport 28015 -j ACCEPT') + sudo('iptables -A INPUT -p tcp --dport 29015 -j ACCEPT') + sudo('iptables -A INPUT -p udp --dport 29015 -j ACCEPT') + sudo('iptables -A INPUT -p tcp --dport 8080 -j ACCEPT') + sudo('iptables -A INPUT -i eth0 -p tcp --dport 8080 -j DROP') + sudo('iptables -I INPUT -i eth0 -s 127.0.0.1 -p tcp --dport 8080 -j ACCEPT') + # save rules + sudo('iptables-save > /etc/sysconfig/iptables') + + +######################################################### +# some helper-functions to handle bad behavior of cluster +######################################################### + +# rebuild indexes +@task +@parallel +def rebuild_indexes(): + run('rethinkdb index-rebuild -n 2') + + +@task +def stopdb(): + sudo('service rethinkdb stop') + + +@task +def startdb(): + sudo('service rethinkdb start') + + +@task +def restartdb(): + sudo('/etc/init.d/rethinkdb restart') diff --git a/bigchain-deployment/get_elastic_ips.py b/bigchain-deployment/get_elastic_ips.py new file mode 100644 index 00000000..5d308d3c --- /dev/null +++ b/bigchain-deployment/get_elastic_ips.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# create Elastic IPs and assign them to instances if needed +import json +import os +import boto.ec2 +import argparse +import time + +AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] +AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] + +parser = argparse.ArgumentParser() +parser.add_argument("--tag", help="tag instances in aws") +args = parser.parse_args() + +if args.tag: + tag = args.tag +else: + # reading credentials from config for remote connection + print('usage: python3 get_elastic_ips.py --tag ') + print('reason: tag missing!!!') + exit(1) + +conn = boto.ec2.connect_to_region("eu-central-1", + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) + +INSTANCE_IDS = [] + + +def prepare_list(tag): + reservations = conn.get_all_instances(filters={"tag:Name" : tag}) + instances = [i for r in reservations for i in r.instances] + for i in instances: + inst = i.__dict__ + #print (inst) + #break + inst_id = inst.get('id') + + INSTANCE_IDS.append(inst_id) + return INSTANCE_IDS + + +def get_new_pubDNS(): + eip = conn.allocate_address() + return eip + +if __name__ == "__main__": + # hostlist.tmp (JSON) erzeugen + instlist = prepare_list(tag) + + for entry in range(0,len(instlist)): + + instance_id = instlist[entry] + print(instance_id) + newpubDNS = get_new_pubDNS() + inID = str(newpubDNS).split(':')[1] + print(inID) + conn.associate_address(instance_id, public_ip=inID) + + # make sure all addresse are assigned... + time.sleep(30) diff --git a/bigchain-deployment/get_instance_status.py b/bigchain-deployment/get_instance_status.py new file mode 100644 index 00000000..175c600d --- /dev/null +++ b/bigchain-deployment/get_instance_status.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import json +import os +import boto.ec2 +import time +import argparse + + +AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] +AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] + +parser = argparse.ArgumentParser() +parser.add_argument("--tag", help="tag instances in aws") +args = parser.parse_args() + +if args.tag: + tag = args.tag +else: + # reading credentials from config for remote connection + print('usage: python3 get_instance_status.py --tag ') + print('reason: tag missing!!!') + exit(1) + +conn = boto.ec2.connect_to_region("eu-central-1", + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) + +PASSING = [] +RUNNING = [] + + +# get list of instance ids from amazon +def list_of_ids(tag): + # TODO: CHANGE TO PROPER DOCSTRING + # Returns a list of ids of all instances with the given tag + reservations = conn.get_all_instances(filters={"tag:Name": tag}) + # There are several reservations + # and each reservation can have several instances + id_list = [] + for reservation in reservations: + for instance in reservation.instances: + if instance.id is not None: + id_list.append(instance.id) + return id_list + + +# Andreas' old code: +""" +INSTANCE_IDS = [] + +def prepare_list(tag): + reservations = conn.get_all_instances(filters={"tag:Name" : tag}) + instances = [i for r in reservations for i in r.instances] + for i in instances: + inst = i.__dict__ + inst_id = inst.get('id') + INSTANCE_IDS.append(inst_id) + return INSTANCE_IDS +""" + + +# get statuses from amazon +def create_run_pass_list(tag): + # instlist_old = prepare_list(tag) + # print("instlist_old = {}".format(instlist_old)) + instlist_new = list_of_ids(tag) + print("instlist_new = {}".format(instlist_new)) + + instlist = instlist_new + + for entry in range(0, len(instlist)): # 0, 1, ..., [len(instlist) - 1] + instances = conn.get_only_instances(instance_ids=instlist[entry]) + status = conn.get_all_instance_status(instance_ids=instlist[entry]) + for instance in instances: + reachability = status[0].system_status.details["reachability"] + PASSING.append(reachability) + return instlist, PASSING, RUNNING + + +if __name__ == "__main__": + # get lists from amazon + try: + instlist, passlist, runlist = create_run_pass_list(tag) + print("instlist = {}".format(instlist)) + print("passlist = {}".format(passlist)) + print("runlist = {}".format(runlist)) + except IndexError: + print("Searching for matching cluster-tag...") + exit(1) + + for entry in range(0,len(instlist)): + if "passed" in passlist and len(set(passlist)) == 1: + print("up and running") + exit(0) + + # exit with error code for continous check if nothing found + exit(1) diff --git a/bigchain-deployment/hostlist.py b/bigchain-deployment/hostlist.py new file mode 100644 index 00000000..b32f8ca7 --- /dev/null +++ b/bigchain-deployment/hostlist.py @@ -0,0 +1 @@ +hosts_dev = ['ec2-52-58-94-210.eu-central-1.compute.amazonaws.com', 'ec2-52-29-236-2.eu-central-1.compute.amazonaws.com'] diff --git a/bigchain-deployment/run_and_tag.py b/bigchain-deployment/run_and_tag.py new file mode 100644 index 00000000..aec080aa --- /dev/null +++ b/bigchain-deployment/run_and_tag.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import boto.ec2 +import time +import argparse +import os + +AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] +AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] + +parser = argparse.ArgumentParser() +parser.add_argument("--tag", help="tag instances in aws") +parser.add_argument("--nodes", help="number of nodes in the cluster") +args = parser.parse_args() + +if args.tag: + tag = args.tag +else: + # reading credentials from config for remote connection + print('usage: python3 run_and_tag.py --tag --nodes ') + print('reason: tag missing!!!') + exit(1) + +if args.nodes: + nodes = int(args.nodes) +else: + print('usage: python3 run_and_tag.py --tag --nodes ') + print('reason: nodes missing!!!') + exit(1) + +conn = boto.ec2.connect_to_region("eu-central-1", + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) + +for _ in range(nodes): # 0, 1, ..., (nodes-1) = nodes items + reservation = conn.run_instances( + 'ami-accff2b1', # ubuntu-image + #'ami-596b7235', # ubuntu w/ iops storage + key_name='bigchain', + # IMPORTANT!!!! - here you change the machine type for the cluster + instance_type='m3.2xlarge', + #instance_type='c3.8xlarge', + #instance_type='c4.8xlarge', + security_groups=['bigchain']) + + for instance in reservation.instances: + time.sleep(5) + instance.add_tag('Name', tag) diff --git a/bigchain-deployment/startup.sh b/bigchain-deployment/startup.sh new file mode 100755 index 00000000..2f946e01 --- /dev/null +++ b/bigchain-deployment/startup.sh @@ -0,0 +1,97 @@ +#! /bin/bash + +function printErr() + { + echo "usage: ./startup.sh " + echo "No argument $1 supplied" + } + +if [ -z "$1" ] + then + printErr "" + exit 1 +fi + +if [ -z "$2" ] + then + printErr "" + exit 1 +fi + +TAG=$1 +NODES=$2 +AWS=`which aws` +FAB=`which fab` +DEPLOYDIR=`pwd` +BIGCHAINDIR=`dirname $DEPLOYDIR` +export BIGCHAINDIR + +# check if awscli is installed +if [ ! -f "$AWS" ] + then + echo "awscli is not installed!!!" + exit 1 +fi + +# checck if python-fabric is installed +if [ ! -f "$FAB" ] + then + echo "python-fabric is not installed!!!" + exit 1 +fi + +# checking pem-file and changing access rights +if [ ! -f "pem/bigchain.pem" ] + then + echo "Valid key is missing!!!" + exit 1 +fi +# 0400 for pem/bigchain.pem +chmod 0400 pem/bigchain.pem + +# starting and tagging instances +python3 run_and_tag.py --tag $TAG --nodes $NODES +# let's wait a minute to get the nodes ready and in status initializing +#sleep 60 + +# checking if instances are up and running (every 5 secs.) +RET=1 +until [ ${RET} -eq 0 ]; do + python3 get_instance_status.py --tag $TAG + RET=$? + sleep 5 +done + +# in case of elastic ips... +python3 get_elastic_ips.py --tag $TAG + +# everything prepared. now wait until instances up and running! +# generate hostlist.py and add_keys.sh +python3 create_hostlist.py --tag $TAG > hostlist.py +# make add_keys executable and execute +chmod +x add2known_hosts.sh +./add2known_hosts.sh + +# resetting the rethinkdb initfile and adding the nodes to join... +cp bigchain/bigchaindb.conf.template bigchain/bigchaindb.conf +cat add2dbconf >> bigchain/bigchaindb.conf + +# rollout base packages for installation of storage and bigchain +fab install_base_software + +# rollout storagebackend (rethinkdb) +fab install_rethinkdb + +# rollout bigchain-reporitory +fab install_bigchain + +# generate genesisblock +HORST=`tail -1 bigchain/bigchaindb.conf|cut -d: -f1|cut -d= -f2` +fab -H $HORST -f fab_prepare_chain.py init_bigchaindb +# initiate sharding +fab start_bigchain_nodes + +# now cleanup! +rm add2known_hosts.sh add2dbconf + +# DONE! From a0bb61d1c724bf723e592e56a5d98ac53e9c04b5 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 14:27:31 +0100 Subject: [PATCH 03/53] Rename bigchain-deployment dir to deploy-cluster-aws --- {bigchain-deployment => deploy-cluster-aws}/Readme.md | 0 .../bigchain/bigchaindb.conf | 0 .../bigchain/bigchaindb.conf.template | 0 {bigchain-deployment => deploy-cluster-aws}/create_hostlist.py | 0 {bigchain-deployment => deploy-cluster-aws}/fab_prepare_chain.py | 0 {bigchain-deployment => deploy-cluster-aws}/fabfile.py | 0 {bigchain-deployment => deploy-cluster-aws}/get_elastic_ips.py | 0 .../get_instance_status.py | 0 {bigchain-deployment => deploy-cluster-aws}/hostlist.py | 0 {bigchain-deployment => deploy-cluster-aws}/run_and_tag.py | 0 {bigchain-deployment => deploy-cluster-aws}/startup.sh | 0 11 files changed, 0 insertions(+), 0 deletions(-) rename {bigchain-deployment => deploy-cluster-aws}/Readme.md (100%) rename {bigchain-deployment => deploy-cluster-aws}/bigchain/bigchaindb.conf (100%) rename {bigchain-deployment => deploy-cluster-aws}/bigchain/bigchaindb.conf.template (100%) rename {bigchain-deployment => deploy-cluster-aws}/create_hostlist.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/fab_prepare_chain.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/fabfile.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/get_elastic_ips.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/get_instance_status.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/hostlist.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/run_and_tag.py (100%) rename {bigchain-deployment => deploy-cluster-aws}/startup.sh (100%) diff --git a/bigchain-deployment/Readme.md b/deploy-cluster-aws/Readme.md similarity index 100% rename from bigchain-deployment/Readme.md rename to deploy-cluster-aws/Readme.md diff --git a/bigchain-deployment/bigchain/bigchaindb.conf b/deploy-cluster-aws/bigchain/bigchaindb.conf similarity index 100% rename from bigchain-deployment/bigchain/bigchaindb.conf rename to deploy-cluster-aws/bigchain/bigchaindb.conf diff --git a/bigchain-deployment/bigchain/bigchaindb.conf.template b/deploy-cluster-aws/bigchain/bigchaindb.conf.template similarity index 100% rename from bigchain-deployment/bigchain/bigchaindb.conf.template rename to deploy-cluster-aws/bigchain/bigchaindb.conf.template diff --git a/bigchain-deployment/create_hostlist.py b/deploy-cluster-aws/create_hostlist.py similarity index 100% rename from bigchain-deployment/create_hostlist.py rename to deploy-cluster-aws/create_hostlist.py diff --git a/bigchain-deployment/fab_prepare_chain.py b/deploy-cluster-aws/fab_prepare_chain.py similarity index 100% rename from bigchain-deployment/fab_prepare_chain.py rename to deploy-cluster-aws/fab_prepare_chain.py diff --git a/bigchain-deployment/fabfile.py b/deploy-cluster-aws/fabfile.py similarity index 100% rename from bigchain-deployment/fabfile.py rename to deploy-cluster-aws/fabfile.py diff --git a/bigchain-deployment/get_elastic_ips.py b/deploy-cluster-aws/get_elastic_ips.py similarity index 100% rename from bigchain-deployment/get_elastic_ips.py rename to deploy-cluster-aws/get_elastic_ips.py diff --git a/bigchain-deployment/get_instance_status.py b/deploy-cluster-aws/get_instance_status.py similarity index 100% rename from bigchain-deployment/get_instance_status.py rename to deploy-cluster-aws/get_instance_status.py diff --git a/bigchain-deployment/hostlist.py b/deploy-cluster-aws/hostlist.py similarity index 100% rename from bigchain-deployment/hostlist.py rename to deploy-cluster-aws/hostlist.py diff --git a/bigchain-deployment/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py similarity index 100% rename from bigchain-deployment/run_and_tag.py rename to deploy-cluster-aws/run_and_tag.py diff --git a/bigchain-deployment/startup.sh b/deploy-cluster-aws/startup.sh similarity index 100% rename from bigchain-deployment/startup.sh rename to deploy-cluster-aws/startup.sh From 4c5e5db149b4dd87bf1cb278c7392e291e23475f Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 15:33:05 +0100 Subject: [PATCH 04/53] Rename deploy-cluster-aws/bigchain dir to .../conf --- deploy-cluster-aws/{bigchain => conf}/bigchaindb.conf | 0 .../{bigchain => conf}/bigchaindb.conf.template | 0 deploy-cluster-aws/fabfile.py | 2 +- deploy-cluster-aws/startup.sh | 6 +++--- 4 files changed, 4 insertions(+), 4 deletions(-) rename deploy-cluster-aws/{bigchain => conf}/bigchaindb.conf (100%) rename deploy-cluster-aws/{bigchain => conf}/bigchaindb.conf.template (100%) diff --git a/deploy-cluster-aws/bigchain/bigchaindb.conf b/deploy-cluster-aws/conf/bigchaindb.conf similarity index 100% rename from deploy-cluster-aws/bigchain/bigchaindb.conf rename to deploy-cluster-aws/conf/bigchaindb.conf diff --git a/deploy-cluster-aws/bigchain/bigchaindb.conf.template b/deploy-cluster-aws/conf/bigchaindb.conf.template similarity index 100% rename from deploy-cluster-aws/bigchain/bigchaindb.conf.template rename to deploy-cluster-aws/conf/bigchaindb.conf.template diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 47eb0a81..7af04d32 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -87,7 +87,7 @@ def install_rethinkdb(): # change fs to user sudo('chown -R rethinkdb:rethinkdb /data') # copy config file to target system - put('bigchain/bigchaindb.conf', + put('conf/bigchaindb.conf', '/etc/rethinkdb/instances.d/instance1.conf', mode=0600, use_sudo=True) # initialize data-dir sudo('rm -rf /data/*') diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 2f946e01..7feae98d 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -73,8 +73,8 @@ chmod +x add2known_hosts.sh ./add2known_hosts.sh # resetting the rethinkdb initfile and adding the nodes to join... -cp bigchain/bigchaindb.conf.template bigchain/bigchaindb.conf -cat add2dbconf >> bigchain/bigchaindb.conf +cp conf/bigchaindb.conf.template conf/bigchaindb.conf +cat add2dbconf >> conf/bigchaindb.conf # rollout base packages for installation of storage and bigchain fab install_base_software @@ -86,7 +86,7 @@ fab install_rethinkdb fab install_bigchain # generate genesisblock -HORST=`tail -1 bigchain/bigchaindb.conf|cut -d: -f1|cut -d= -f2` +HORST=`tail -1 conf/bigchaindb.conf|cut -d: -f1|cut -d= -f2` fab -H $HORST -f fab_prepare_chain.py init_bigchaindb # initiate sharding fab start_bigchain_nodes From 5fb6be064bc17d9d4f07464756cd9d9602913d1d Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 17:12:01 +0100 Subject: [PATCH 05/53] Add some files created for AWS deployment to .gitignore --- .gitignore | 4 + deploy-cluster-aws/conf/bigchaindb.conf | 107 ------------------------ deploy-cluster-aws/hostlist.py | 1 - 3 files changed, 4 insertions(+), 108 deletions(-) delete mode 100644 deploy-cluster-aws/conf/bigchaindb.conf delete mode 100644 deploy-cluster-aws/hostlist.py diff --git a/.gitignore b/.gitignore index 16bf3f03..ae5d85fa 100644 --- a/.gitignore +++ b/.gitignore @@ -67,3 +67,7 @@ target/ # Private key files from AWS *.pem + +# Some files created when deploying a cluster on AWS +deploy-cluster-aws/conf/bigchaindb.conf +deploy-cluster-aws/hostlist.py diff --git a/deploy-cluster-aws/conf/bigchaindb.conf b/deploy-cluster-aws/conf/bigchaindb.conf deleted file mode 100644 index 3d64abb5..00000000 --- a/deploy-cluster-aws/conf/bigchaindb.conf +++ /dev/null @@ -1,107 +0,0 @@ -# -# RethinkDB instance configuration sample -# -# - Give this file the extension .conf and put it in /etc/rethinkdb/instances.d in order to enable it. -# - See http://www.rethinkdb.com/docs/guides/startup/ for the complete documentation -# - Uncomment an option to change its value. -# - -############################### -## RethinkDB configuration -############################### - -### Process options - -## User and group used to run rethinkdb -## Command line default: do not change user or group -## Init script default: rethinkdb user and group -# runuser=rethinkdb -# rungroup=rethinkdb - -## Stash the pid in this file when the process is running -## Note for systemd users: Systemd uses its own internal mechanism. Do not set this parameter. -## Command line default: none -## Init script default: /var/run/rethinkdb//pid_file (where is the name of this config file without the extension) -# pid-file=/var/run/rethinkdb/rethinkdb.pid - -### File path options - -## Directory to store data and metadata -## Command line default: ./rethinkdb_data -## Init script default: /var/lib/rethinkdb// (where is the name of this file without the extension) -directory=/data - -## Log file options -## Default: /log_file -#log-file=/var/log/rethinkdb - -### Network options - -## Address of local interfaces to listen on when accepting connections -## May be 'all' or an IP address, loopback addresses are enabled by default -## Default: all local addresses -# bind=127.0.0.1 -bind=all - -## Address that other rethinkdb instances will use to connect to this server. -## It can be specified multiple times -# canonical-address= - -## The port for rethinkdb protocol for client drivers -## Default: 28015 + port-offset -# driver-port=28015 - -## The port for receiving connections from other nodes -## Default: 29015 + port-offset -# cluster-port=29015 - -## The host:port of a node that rethinkdb will connect to -## This option can be specified multiple times. -## Default: none -# join=example.com:29015 - -## All ports used locally will have this value added -## Default: 0 -# port-offset=0 - -## r.http(...) queries will use the given server as a web proxy -## Default: no proxy -# reql-http-proxy=socks5://example.com:1080 - -### Web options - -## Port for the http admin console -## Default: 8080 + port-offset -# http-port=8080 - -## Disable web administration console -# no-http-admin - -### CPU options - -## The number of cores to use -## Default: total number of cores of the CPU -# cores=2 - -### Memory options - -## Size of the cache in MB -## Default: Half of the available RAM on startup -# cache-size=1024 - -### Disk - -## How many simultaneous I/O operations can happen at the same time -# io-threads=64 -#io-threads=128 - -## Enable direct I/O -direct-io - -### Meta - -## The name for this server (as will appear in the metadata). -## If not specified, it will be randomly chosen from a short list of names. -# server-name=server1 -## The host:port of a node that rethinkdb will connect to -join=ec2-52-58-94-210.eu-central-1.compute.amazonaws.com:29015 diff --git a/deploy-cluster-aws/hostlist.py b/deploy-cluster-aws/hostlist.py deleted file mode 100644 index b32f8ca7..00000000 --- a/deploy-cluster-aws/hostlist.py +++ /dev/null @@ -1 +0,0 @@ -hosts_dev = ['ec2-52-58-94-210.eu-central-1.compute.amazonaws.com', 'ec2-52-29-236-2.eu-central-1.compute.amazonaws.com'] From b1493979742b3a71b99e89957efeee72bcf2e38d Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 17:27:48 +0100 Subject: [PATCH 06/53] Comment-out 2 lines about BIGCHAINDIR in startup.sh --- deploy-cluster-aws/startup.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 7feae98d..1510d23f 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -23,8 +23,10 @@ NODES=$2 AWS=`which aws` FAB=`which fab` DEPLOYDIR=`pwd` -BIGCHAINDIR=`dirname $DEPLOYDIR` -export BIGCHAINDIR +# It seems BIGCHAINDIR was never used, but I wasn't sure +# so I just commented-out the following two lines. -Troy +#BIGCHAINDIR=`dirname $DEPLOYDIR` +#export BIGCHAINDIR # check if awscli is installed if [ ! -f "$AWS" ] From 85580aba07fea1fbd8c73a1ae8b396ca873f80f6 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 17:53:53 +0100 Subject: [PATCH 07/53] Change expected name of pem file to bigchaindb.pem --- deploy-cluster-aws/fab_prepare_chain.py | 2 +- deploy-cluster-aws/fabfile.py | 2 +- deploy-cluster-aws/startup.sh | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/deploy-cluster-aws/fab_prepare_chain.py b/deploy-cluster-aws/fab_prepare_chain.py index 98aedd7d..dfe49fb2 100644 --- a/deploy-cluster-aws/fab_prepare_chain.py +++ b/deploy-cluster-aws/fab_prepare_chain.py @@ -21,7 +21,7 @@ from fabric.decorators import roles from fabtools import * env.user = 'ubuntu' -env.key_filename = 'pem/bigchain.pem' +env.key_filename = 'pem/bigchaindb.pem' @task def init_bigchaindb(): diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 7af04d32..4019156a 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -37,7 +37,7 @@ env.roledefs = { } env.roles = ["role1"] env.user = 'ubuntu' -env.key_filename = 'pem/bigchain.pem' +env.key_filename = 'pem/bigchaindb.pem' ################################################################################ diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 1510d23f..5a2a9c59 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -43,13 +43,13 @@ if [ ! -f "$FAB" ] fi # checking pem-file and changing access rights -if [ ! -f "pem/bigchain.pem" ] +if [ ! -f "pem/bigchaindb.pem" ] then echo "Valid key is missing!!!" exit 1 fi -# 0400 for pem/bigchain.pem -chmod 0400 pem/bigchain.pem +# 0400 for pem/bigchaindb.pem +chmod 0400 pem/bigchaindb.pem # starting and tagging instances python3 run_and_tag.py --tag $TAG --nodes $NODES From 63a0e1e10bd842a40967520620710f53da8857eb Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 18:12:48 +0100 Subject: [PATCH 08/53] Rename install_bigchain to install_bigchaindb --- deploy-cluster-aws/fabfile.py | 4 ++-- deploy-cluster-aws/startup.sh | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 4019156a..2d261603 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -95,10 +95,10 @@ def install_rethinkdb(): sudo('/etc/init.d/rethinkdb restart') -# bigchain deployment +# bigchaindb deployment @task @parallel -def install_bigchain(): +def install_bigchaindb(): sudo('python3 -m pip install bigchaindb') diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 5a2a9c59..71f72609 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -78,14 +78,15 @@ chmod +x add2known_hosts.sh cp conf/bigchaindb.conf.template conf/bigchaindb.conf cat add2dbconf >> conf/bigchaindb.conf -# rollout base packages for installation of storage and bigchain +# rollout base packages (dependencies) needed before +# storage backend (rethinkdb) and bigchaindb can be rolled out fab install_base_software -# rollout storagebackend (rethinkdb) +# rollout storage backend (rethinkdb) fab install_rethinkdb -# rollout bigchain-reporitory -fab install_bigchain +# rollout bigchaindb +fab install_bigchaindb # generate genesisblock HORST=`tail -1 conf/bigchaindb.conf|cut -d: -f1|cut -d= -f2` From 4a5e8f825a4d761cdd5877cf6b891fc154c1be2b Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 18:29:08 +0100 Subject: [PATCH 09/53] Rename start_bigchain_nodes to start_bigchaindb_nodes --- deploy-cluster-aws/fabfile.py | 2 +- deploy-cluster-aws/startup.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 2d261603..4389462d 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -105,7 +105,7 @@ def install_bigchaindb(): # startup all nodes of bigchaindb in cluster @task @parallel -def start_bigchain_nodes(): +def start_bigchaindb_nodes(): sudo('screen -d -m bigchaindb -y start &', pty = False) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 71f72609..a5c2215a 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -92,7 +92,7 @@ fab install_bigchaindb HORST=`tail -1 conf/bigchaindb.conf|cut -d: -f1|cut -d= -f2` fab -H $HORST -f fab_prepare_chain.py init_bigchaindb # initiate sharding -fab start_bigchain_nodes +fab start_bigchaindb_nodes # now cleanup! rm add2known_hosts.sh add2dbconf From 2ab76e87b9cb67d55f80a73e75852ee63db7ddc7 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 18:38:29 +0100 Subject: [PATCH 10/53] Rewrote fabfile.py docstring --- deploy-cluster-aws/fabfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 4389462d..a327b301 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -1,8 +1,8 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" Preparing, installing and configuring - bigchain and the storage backend +"""A fabfile with functionality to prepare, install, and configure +bigchaindb, including its storage backend. """ from __future__ import with_statement From f65e73e4fa035a98abb8a0e7630f2e1995764e8e Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 18:50:47 +0100 Subject: [PATCH 11/53] Change AWS key_name to bigchaindb in run_and_tag.py --- deploy-cluster-aws/run_and_tag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py index aec080aa..4d29c949 100644 --- a/deploy-cluster-aws/run_and_tag.py +++ b/deploy-cluster-aws/run_and_tag.py @@ -37,7 +37,7 @@ for _ in range(nodes): # 0, 1, ..., (nodes-1) = nodes items reservation = conn.run_instances( 'ami-accff2b1', # ubuntu-image #'ami-596b7235', # ubuntu w/ iops storage - key_name='bigchain', + key_name='bigchaindb', # IMPORTANT!!!! - here you change the machine type for the cluster instance_type='m3.2xlarge', #instance_type='c3.8xlarge', From cd05a9851541a16844b877185fe84ff6fa0962c8 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 23 Mar 2016 19:14:13 +0100 Subject: [PATCH 12/53] Rename AWS security group to bigchaindb in run_and_tag.py --- deploy-cluster-aws/run_and_tag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py index 4d29c949..e2cf0f0d 100644 --- a/deploy-cluster-aws/run_and_tag.py +++ b/deploy-cluster-aws/run_and_tag.py @@ -42,7 +42,7 @@ for _ in range(nodes): # 0, 1, ..., (nodes-1) = nodes items instance_type='m3.2xlarge', #instance_type='c3.8xlarge', #instance_type='c4.8xlarge', - security_groups=['bigchain']) + security_groups=['bigchaindb']) for instance in reservation.instances: time.sleep(5) From 0dcfa8045cd22804ef5fb975aa11dd0cd21872f9 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 24 Mar 2016 10:03:12 +0100 Subject: [PATCH 13/53] Better error message when pem/bigchaindb.pem missing --- deploy-cluster-aws/startup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index a5c2215a..4f4a3452 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -45,7 +45,7 @@ fi # checking pem-file and changing access rights if [ ! -f "pem/bigchaindb.pem" ] then - echo "Valid key is missing!!!" + echo "File pem/bigchaindb.pem (AWS private key) is missing" exit 1 fi # 0400 for pem/bigchaindb.pem From 842b534876aa76fa1542944b4b7ad389bb116911 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 24 Mar 2016 10:04:15 +0100 Subject: [PATCH 14/53] Notes in Readme.md in prep for migration to boto3 --- deploy-cluster-aws/Readme.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/deploy-cluster-aws/Readme.md b/deploy-cluster-aws/Readme.md index 359b0898..a821cadd 100644 --- a/deploy-cluster-aws/Readme.md +++ b/deploy-cluster-aws/Readme.md @@ -5,9 +5,10 @@ #### Prerequesites - Valid AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY is needed, both are exported as variables to the shell - - awscli, - - boto - - fabric w/ fabtools + - awscli - DO WE EVEN USE THIS? + - boto - DEPENDENCY BEING REMOVED + - boto3 (Python 2 or 3) + - fabric w/ fabtools (Python 2 only) #### Cluster Installation - Got to the DEPLOY-directory and run './startup.sh' with two parameters (tag and number of nodes)...that's it! From f46fb86ec49ba89e51976fde91b1afc6e4071216 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 24 Mar 2016 11:47:23 +0100 Subject: [PATCH 15/53] Migrate run_and_tag.py from boto to boto3 --- deploy-cluster-aws/run_and_tag.py | 56 ++++++++++++++++++------------- deploy-cluster-aws/startup.sh | 2 +- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py index e2cf0f0d..717e1653 100644 --- a/deploy-cluster-aws/run_and_tag.py +++ b/deploy-cluster-aws/run_and_tag.py @@ -1,49 +1,59 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- -import boto.ec2 +import boto3 import time import argparse import os -AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] -AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] +AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] +AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] +AWS_REGION = os.environ['AWS_REGION'] parser = argparse.ArgumentParser() -parser.add_argument("--tag", help="tag instances in aws") -parser.add_argument("--nodes", help="number of nodes in the cluster") +parser.add_argument("--tag", + help="tag to add to all launched instances on AWS") +parser.add_argument("--nodes", + help="number of nodes in the cluster") args = parser.parse_args() if args.tag: tag = args.tag else: # reading credentials from config for remote connection - print('usage: python3 run_and_tag.py --tag --nodes ') + print('usage: python run_and_tag.py --tag --nodes ') print('reason: tag missing!!!') exit(1) if args.nodes: nodes = int(args.nodes) else: - print('usage: python3 run_and_tag.py --tag --nodes ') + print('usage: python run_and_tag.py --tag --nodes ') print('reason: nodes missing!!!') exit(1) -conn = boto.ec2.connect_to_region("eu-central-1", - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) +# Connect to Amazon EC2 +ec2 = boto3.resource(service_name='ec2', + region_name=AWS_REGION, + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) -for _ in range(nodes): # 0, 1, ..., (nodes-1) = nodes items - reservation = conn.run_instances( - 'ami-accff2b1', # ubuntu-image - #'ami-596b7235', # ubuntu w/ iops storage - key_name='bigchaindb', - # IMPORTANT!!!! - here you change the machine type for the cluster - instance_type='m3.2xlarge', - #instance_type='c3.8xlarge', - #instance_type='c4.8xlarge', - security_groups=['bigchaindb']) - for instance in reservation.instances: +for _ in range(nodes): # = [0, 1, ..., (nodes-1)] + # Request the launch of one instance at a time + # (so list_of_instances should contain only one item) + list_of_instances = ec2.create_instances( + ImageId='ami-accff2b1', # ubuntu-image + # 'ami-596b7235', # ubuntu w/ iops storage + MinCount=1, + MaxCount=1, + KeyName='bigchaindb', + InstanceType='m3.2xlarge', + # 'c3.8xlarge', + # 'c4.8xlarge', + SecurityGroupIds=['bigchaindb'] + ) + + # Tag the just-launched instances (should be just one) + for instance in list_of_instances: time.sleep(5) - instance.add_tag('Name', tag) + instance.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 4f4a3452..e1e5334b 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -52,7 +52,7 @@ fi chmod 0400 pem/bigchaindb.pem # starting and tagging instances -python3 run_and_tag.py --tag $TAG --nodes $NODES +python run_and_tag.py --tag $TAG --nodes $NODES # let's wait a minute to get the nodes ready and in status initializing #sleep 60 From c0a7be618cffcd068ccc79f54f805cbb7817a11f Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 24 Mar 2016 19:39:33 +0100 Subject: [PATCH 16/53] Revised how wait-for-running was done in AWS deployment --- deploy-cluster-aws/get_instance_status.py | 99 -------------------- deploy-cluster-aws/startup.sh | 14 ++- deploy-cluster-aws/wait_until_all_running.py | 45 +++++++++ 3 files changed, 51 insertions(+), 107 deletions(-) delete mode 100644 deploy-cluster-aws/get_instance_status.py create mode 100644 deploy-cluster-aws/wait_until_all_running.py diff --git a/deploy-cluster-aws/get_instance_status.py b/deploy-cluster-aws/get_instance_status.py deleted file mode 100644 index 175c600d..00000000 --- a/deploy-cluster-aws/get_instance_status.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import json -import os -import boto.ec2 -import time -import argparse - - -AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] -AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] - -parser = argparse.ArgumentParser() -parser.add_argument("--tag", help="tag instances in aws") -args = parser.parse_args() - -if args.tag: - tag = args.tag -else: - # reading credentials from config for remote connection - print('usage: python3 get_instance_status.py --tag ') - print('reason: tag missing!!!') - exit(1) - -conn = boto.ec2.connect_to_region("eu-central-1", - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - -PASSING = [] -RUNNING = [] - - -# get list of instance ids from amazon -def list_of_ids(tag): - # TODO: CHANGE TO PROPER DOCSTRING - # Returns a list of ids of all instances with the given tag - reservations = conn.get_all_instances(filters={"tag:Name": tag}) - # There are several reservations - # and each reservation can have several instances - id_list = [] - for reservation in reservations: - for instance in reservation.instances: - if instance.id is not None: - id_list.append(instance.id) - return id_list - - -# Andreas' old code: -""" -INSTANCE_IDS = [] - -def prepare_list(tag): - reservations = conn.get_all_instances(filters={"tag:Name" : tag}) - instances = [i for r in reservations for i in r.instances] - for i in instances: - inst = i.__dict__ - inst_id = inst.get('id') - INSTANCE_IDS.append(inst_id) - return INSTANCE_IDS -""" - - -# get statuses from amazon -def create_run_pass_list(tag): - # instlist_old = prepare_list(tag) - # print("instlist_old = {}".format(instlist_old)) - instlist_new = list_of_ids(tag) - print("instlist_new = {}".format(instlist_new)) - - instlist = instlist_new - - for entry in range(0, len(instlist)): # 0, 1, ..., [len(instlist) - 1] - instances = conn.get_only_instances(instance_ids=instlist[entry]) - status = conn.get_all_instance_status(instance_ids=instlist[entry]) - for instance in instances: - reachability = status[0].system_status.details["reachability"] - PASSING.append(reachability) - return instlist, PASSING, RUNNING - - -if __name__ == "__main__": - # get lists from amazon - try: - instlist, passlist, runlist = create_run_pass_list(tag) - print("instlist = {}".format(instlist)) - print("passlist = {}".format(passlist)) - print("runlist = {}".format(runlist)) - except IndexError: - print("Searching for matching cluster-tag...") - exit(1) - - for entry in range(0,len(instlist)): - if "passed" in passlist and len(set(passlist)) == 1: - print("up and running") - exit(0) - - # exit with error code for continous check if nothing found - exit(1) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index e1e5334b..1cb5de28 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -1,5 +1,8 @@ #! /bin/bash +# The set -e option instructs bash to immediately exit if any command has a non-zero exit status +set -e + function printErr() { echo "usage: ./startup.sh " @@ -51,18 +54,13 @@ fi # 0400 for pem/bigchaindb.pem chmod 0400 pem/bigchaindb.pem -# starting and tagging instances +echo "Starting and tagging instances on EC2" python run_and_tag.py --tag $TAG --nodes $NODES # let's wait a minute to get the nodes ready and in status initializing #sleep 60 -# checking if instances are up and running (every 5 secs.) -RET=1 -until [ ${RET} -eq 0 ]; do - python3 get_instance_status.py --tag $TAG - RET=$? - sleep 5 -done +# Wait until all those instances are running +python wait_until_all_running.py --tag $TAG # in case of elastic ips... python3 get_elastic_ips.py --tag $TAG diff --git a/deploy-cluster-aws/wait_until_all_running.py b/deploy-cluster-aws/wait_until_all_running.py new file mode 100644 index 00000000..3a3ec2fb --- /dev/null +++ b/deploy-cluster-aws/wait_until_all_running.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +import os +import boto3 +import argparse + + +AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] +AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] +AWS_REGION = os.environ['AWS_REGION'] + +parser = argparse.ArgumentParser() +parser.add_argument("--tag", help="tag instances in aws") +args = parser.parse_args() + +if args.tag: + tag = args.tag +else: + # reading credentials from config for remote connection + print('usage: python get_instance_status.py --tag ') + print('reason: tag missing!!!') + exit(1) + +# Connect to Amazon EC2 +ec2 = boto3.resource(service_name='ec2', + region_name=AWS_REGION, + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) + +# Get a list of all instances with the specified tag. +# (Technically, instances_with_tag is an ec2.instancesCollection.) +instances_with_tag = ec2.instances.filter( + Filters=[{'Name': 'tag:Name', 'Values': [tag]}] + ) +print('The instances with tag {} have these ids:'.format(tag)) +for instance in instances_with_tag: + print(instance.id) + +print('Waiting until all those instances exist...') +for instance in instances_with_tag: + instance.wait_until_exists() + +print('Waiting until all those instances are running...') +for instance in instances_with_tag: + instance.wait_until_running() From 48501fa99388c5c7d3b84b19b935efcda01ef661 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 24 Mar 2016 19:43:43 +0100 Subject: [PATCH 17/53] Minor cleanup and consistency revisions --- deploy-cluster-aws/run_and_tag.py | 6 ++++-- deploy-cluster-aws/startup.sh | 5 ++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py index 717e1653..dc495026 100644 --- a/deploy-cluster-aws/run_and_tag.py +++ b/deploy-cluster-aws/run_and_tag.py @@ -25,7 +25,7 @@ else: exit(1) if args.nodes: - nodes = int(args.nodes) + num_nodes = int(args.nodes) else: print('usage: python run_and_tag.py --tag --nodes ') print('reason: nodes missing!!!') @@ -38,7 +38,9 @@ ec2 = boto3.resource(service_name='ec2', aws_secret_access_key=AWS_SECRET_ACCESS_KEY) -for _ in range(nodes): # = [0, 1, ..., (nodes-1)] +print('Starting and tagging instances on Amazon EC2...') + +for _ in range(num_nodes): # Request the launch of one instance at a time # (so list_of_instances should contain only one item) list_of_instances = ec2.create_instances( diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 1cb5de28..c97a6b33 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -54,10 +54,9 @@ fi # 0400 for pem/bigchaindb.pem chmod 0400 pem/bigchaindb.pem -echo "Starting and tagging instances on EC2" +# Start the specified number of nodes +# and tag them with the specified tag python run_and_tag.py --tag $TAG --nodes $NODES -# let's wait a minute to get the nodes ready and in status initializing -#sleep 60 # Wait until all those instances are running python wait_until_all_running.py --tag $TAG From 810b4bf120553025a772de628114634803e7202f Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 09:50:29 +0200 Subject: [PATCH 18/53] Consistent headers in .py files for AWS deployment --- deploy-cluster-aws/create_hostlist.py | 2 +- deploy-cluster-aws/fab_prepare_chain.py | 3 +-- deploy-cluster-aws/fabfile.py | 3 +-- deploy-cluster-aws/get_elastic_ips.py | 3 ++- deploy-cluster-aws/run_and_tag.py | 1 + deploy-cluster-aws/wait_until_all_running.py | 1 + 6 files changed, 7 insertions(+), 6 deletions(-) diff --git a/deploy-cluster-aws/create_hostlist.py b/deploy-cluster-aws/create_hostlist.py index 5793d06f..36a79b5d 100644 --- a/deploy-cluster-aws/create_hostlist.py +++ b/deploy-cluster-aws/create_hostlist.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- +from __future__ import unicode_literals import json import argparse import boto.ec2 diff --git a/deploy-cluster-aws/fab_prepare_chain.py b/deploy-cluster-aws/fab_prepare_chain.py index dfe49fb2..caa827ad 100644 --- a/deploy-cluster-aws/fab_prepare_chain.py +++ b/deploy-cluster-aws/fab_prepare_chain.py @@ -1,10 +1,9 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- """ Generating genesis block """ -from __future__ import with_statement +from __future__ import with_statement, unicode_literals from fabric import colors as c from fabric.api import * diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index a327b301..dff3ab75 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -1,11 +1,10 @@ -#!/usr/bin/env python # -*- coding: utf-8 -*- """A fabfile with functionality to prepare, install, and configure bigchaindb, including its storage backend. """ -from __future__ import with_statement +from __future__ import with_statement, unicode_literals import requests from time import * diff --git a/deploy-cluster-aws/get_elastic_ips.py b/deploy-cluster-aws/get_elastic_ips.py index 5d308d3c..73daf9aa 100644 --- a/deploy-cluster-aws/get_elastic_ips.py +++ b/deploy-cluster-aws/get_elastic_ips.py @@ -1,7 +1,8 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- # create Elastic IPs and assign them to instances if needed + +from __future__ import unicode_literals import json import os import boto.ec2 diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py index dc495026..c1eab98e 100644 --- a/deploy-cluster-aws/run_and_tag.py +++ b/deploy-cluster-aws/run_and_tag.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +from __future__ import unicode_literals import boto3 import time import argparse diff --git a/deploy-cluster-aws/wait_until_all_running.py b/deploy-cluster-aws/wait_until_all_running.py index 3a3ec2fb..732ce617 100644 --- a/deploy-cluster-aws/wait_until_all_running.py +++ b/deploy-cluster-aws/wait_until_all_running.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +from __future__ import unicode_literals import os import boto3 import argparse From 8cebcfb19500dac080c73ff1013d566c20a37671 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 11:53:38 +0200 Subject: [PATCH 19/53] Acquire & assign elastic IPs the boto3 way --- deploy-cluster-aws/get_elastic_ips.py | 76 +++++++++++++-------------- deploy-cluster-aws/startup.sh | 3 +- 2 files changed, 39 insertions(+), 40 deletions(-) diff --git a/deploy-cluster-aws/get_elastic_ips.py b/deploy-cluster-aws/get_elastic_ips.py index 73daf9aa..9976eb27 100644 --- a/deploy-cluster-aws/get_elastic_ips.py +++ b/deploy-cluster-aws/get_elastic_ips.py @@ -1,16 +1,17 @@ # -*- coding: utf-8 -*- -# create Elastic IPs and assign them to instances if needed +""" Create Elastic IPs and assign them to instances if needed. +""" from __future__ import unicode_literals -import json import os -import boto.ec2 +import boto3 import argparse import time -AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] -AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] +AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] +AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] +AWS_REGION = os.environ['AWS_REGION'] parser = argparse.ArgumentParser() parser.add_argument("--tag", help="tag instances in aws") @@ -20,46 +21,45 @@ if args.tag: tag = args.tag else: # reading credentials from config for remote connection - print('usage: python3 get_elastic_ips.py --tag ') + print('usage: python get_elastic_ips.py --tag ') print('reason: tag missing!!!') exit(1) -conn = boto.ec2.connect_to_region("eu-central-1", - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - -INSTANCE_IDS = [] +# Connect to Amazon EC2 +ec2 = boto3.resource(service_name='ec2', + region_name=AWS_REGION, + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) -def prepare_list(tag): - reservations = conn.get_all_instances(filters={"tag:Name" : tag}) - instances = [i for r in reservations for i in r.instances] - for i in instances: - inst = i.__dict__ - #print (inst) - #break - inst_id = inst.get('id') +# Get a list of all instances with the specified tag. +# (Technically, instances_with_tag is an ec2.instancesCollection.) +instances_with_tag = ec2.instances.filter( + Filters=[{'Name': 'tag:Name', 'Values': [tag]}] + ) - INSTANCE_IDS.append(inst_id) - return INSTANCE_IDS +print('Allocating elastic IP addresses and assigning them to the instances...') +for instance in instances_with_tag: + # Create a client from the ec2 resource + # See http://boto3.readthedocs.org/en/latest/guide/clients.html + client = ec2.meta.client -def get_new_pubDNS(): - eip = conn.allocate_address() - return eip + # Acquire an Elastic IP address + # response is a dict. See http://tinyurl.com/z2n7u9k + response = client.allocate_address(DryRun=False, Domain='standard') + public_ip = response['PublicIp'] + print('public_ip = {}'.format(public_ip)) -if __name__ == "__main__": - # hostlist.tmp (JSON) erzeugen - instlist = prepare_list(tag) + # Associate that Elastic IP address with an instance + response2 = client.associate_address( + DryRun=False, + InstanceId=instance.instance_id, + PublicIp=public_ip + ) + print('was associated with the instance with id {}'. + format(instance.instance_id)) - for entry in range(0,len(instlist)): - - instance_id = instlist[entry] - print(instance_id) - newpubDNS = get_new_pubDNS() - inID = str(newpubDNS).split(':')[1] - print(inID) - conn.associate_address(instance_id, public_ip=inID) - - # make sure all addresse are assigned... - time.sleep(30) +# Make sure all IP addresses are assigned... +print('Waiting 30 seconds to make sure all IP addresses are assigned...') +time.sleep(30) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index c97a6b33..059b576d 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -62,9 +62,8 @@ python run_and_tag.py --tag $TAG --nodes $NODES python wait_until_all_running.py --tag $TAG # in case of elastic ips... -python3 get_elastic_ips.py --tag $TAG +python get_elastic_ips.py --tag $TAG -# everything prepared. now wait until instances up and running! # generate hostlist.py and add_keys.sh python3 create_hostlist.py --tag $TAG > hostlist.py # make add_keys executable and execute From a9edd9b629f580c34fbad2ea9612125b1137478c Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 14:40:07 +0200 Subject: [PATCH 20/53] Revised create_hostlist.py to use boto3 & nicer Python --- deploy-cluster-aws/create_hostlist.py | 93 ++++++++++++++------------- deploy-cluster-aws/startup.sh | 10 +-- 2 files changed, 54 insertions(+), 49 deletions(-) diff --git a/deploy-cluster-aws/create_hostlist.py b/deploy-cluster-aws/create_hostlist.py index 36a79b5d..0f5253a3 100644 --- a/deploy-cluster-aws/create_hostlist.py +++ b/deploy-cluster-aws/create_hostlist.py @@ -1,68 +1,71 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals -import json +# from __future__ import unicode_literals import argparse -import boto.ec2 +import boto3 import os -AWS_ACCESS_KEY_ID = os.environ["AWS_ACCESS_KEY_ID"] -AWS_SECRET_ACCESS_KEY = os.environ["AWS_SECRET_ACCESS_KEY"] +AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] +AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] +AWS_REGION = os.environ['AWS_REGION'] parser = argparse.ArgumentParser() parser.add_argument("--tag", help="tag instances in aws") args = parser.parse_args() -conn = boto.ec2.connect_to_region("eu-central-1", - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - -PUBLIC_LIST = [] -PRIVATE_LIST = [] -INSTANCE_IDS = [] - if args.tag: tag = args.tag else: # reading credentials from config for remote connection - print('usage: python3 create_hostlist.py --tag ') + print('usage: python create_hostlist.py --tag ') print('reason: tag missing!!!') exit(1) +# Connect to Amazon EC2 +ec2 = boto3.resource(service_name='ec2', + region_name=AWS_REGION, + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) -def prepare_list(tag): - reservations = conn.get_all_instances(filters={"tag:Name" : tag}) - instances = [i for r in reservations for i in r.instances] - for i in instances: - inst = i.__dict__ - publdns = inst.get('public_dns_name') - privdns = inst.get('private_dns_name') - inst_id = inst.get('id') - PUBLIC_LIST.append(publdns) - PRIVATE_LIST.append(privdns) - INSTANCE_IDS.append(inst_id) - return PUBLIC_LIST, PRIVATE_LIST, INSTANCE_IDS +# Get a list of all instances with the specified tag. +# (Technically, instances_with_tag is an ec2.instancesCollection.) +instances_with_tag = ec2.instances.filter( + Filters=[{'Name': 'tag:Name', 'Values': [tag]}] + ) +publist = [] +for instance in instances_with_tag: + public_dns_name = getattr(instance, 'public_dns_name', None) + if public_dns_name is not None: + publist.append(public_dns_name) -# get lists from amazon -publist, privlist, instlist = prepare_list(tag) +# Create shellscript add2known_hosts.sh for adding remote keys to known_hosts +with open('add2known_hosts.sh', 'w') as f: + f.write('#! /bin/bash\n') + for public_dns_name in publist: + f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n') -# create shellscript for adding remote keys to known_hosts -localFile = open('add2known_hosts.sh', 'w') -localFile.write('#! /bin/bash\n') -for entry in range(0,len(publist)): - localFile.write('ssh-keyscan ' + publist[entry] + ' >> ~/.ssh/known_hosts\n') -localFile.close() +# Create a file named add2dbconf, overwriting one if it already exists +with open('add2dbconf', 'w') as f: + f.write('## The host:port of a node that RethinkDB will connect to\n') + for public_dns_name in publist: + f.write('join=' + public_dns_name + ':29015\n') -# hostliste und id-liste aus json erzeugen -hosts = publist -localFile = open('add2dbconf', 'w') -before = 'join=' -after = ':29015' -localFile.write('## The host:port of a node that rethinkdb will connect to\n') -for entry in range(0,int(len(hosts)/2)): - localFile.write(before + hosts[entry] + after + '\n') +# Note: The original code by Andreas wrote a file with lines of the form +# join=public_dns_name_0:29015 +# join=public_dns_name_1:29015 +# but it stopped about halfway through the list of public_dns_names +# (publist). In principle, it's only strictly necessary to +# have one join= line. +# Maybe Andreas thought that more is better, but all is too much? +# Below is Andreas' original code. -Troy +# localFile = open('add2dbconf', 'w') +# before = 'join=' +# after = ':29015' +# localFile.write('## The host:port of a node that rethinkdb will connect to\n') +# for entry in range(0,int(len(publist)/2)): +# localFile.write(before + publist[entry] + after + '\n') - -# printout hostlist -print ("hosts_dev = ", publist) +# Create a file named hostlist.py, overwriting one if it already exists +with open('hostlist.py', 'w') as f: + f.write('hosts_dev = {}'.format(publist)) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 059b576d..88b00166 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -64,13 +64,15 @@ python wait_until_all_running.py --tag $TAG # in case of elastic ips... python get_elastic_ips.py --tag $TAG -# generate hostlist.py and add_keys.sh -python3 create_hostlist.py --tag $TAG > hostlist.py -# make add_keys executable and execute +# Create three files: +# add2known_hosts.sh, add2dbconf and hostlist.py +python create_hostlist.py --tag $TAG + +# Make add2known_hosts.sh executable and execute it chmod +x add2known_hosts.sh ./add2known_hosts.sh -# resetting the rethinkdb initfile and adding the nodes to join... +# Reset the RethinkDB configuration file and add the nodes to join cp conf/bigchaindb.conf.template conf/bigchaindb.conf cat add2dbconf >> conf/bigchaindb.conf From 4d7c578adbbe7895025ed8b426037a9f678a1a02 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 14:56:32 +0200 Subject: [PATCH 21/53] Added some help comments to startup.sh --- deploy-cluster-aws/startup.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 88b00166..9a62e7eb 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -87,8 +87,12 @@ fab install_rethinkdb fab install_bigchaindb # generate genesisblock +# HORST is the last public_dns_name listed in conf/bigchaindb.conf +# For example: +# ec2-52-58-86-145.eu-central-1.compute.amazonaws.com HORST=`tail -1 conf/bigchaindb.conf|cut -d: -f1|cut -d= -f2` fab -H $HORST -f fab_prepare_chain.py init_bigchaindb + # initiate sharding fab start_bigchaindb_nodes From ff68501dc210c59e416cf238b7a1c3798f6d8f12 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 15:37:46 +0200 Subject: [PATCH 22/53] Removed check for awscli because we don't use it anymore --- deploy-cluster-aws/startup.sh | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 9a62e7eb..027953b8 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -23,22 +23,15 @@ fi TAG=$1 NODES=$2 -AWS=`which aws` FAB=`which fab` -DEPLOYDIR=`pwd` + # It seems BIGCHAINDIR was never used, but I wasn't sure -# so I just commented-out the following two lines. -Troy +# so I just commented-out the following lines. -Troy +#DEPLOYDIR=`pwd` #BIGCHAINDIR=`dirname $DEPLOYDIR` #export BIGCHAINDIR -# check if awscli is installed -if [ ! -f "$AWS" ] - then - echo "awscli is not installed!!!" - exit 1 -fi - -# checck if python-fabric is installed +# check if python-fabric is installed if [ ! -f "$FAB" ] then echo "python-fabric is not installed!!!" From 8949b5d8c6b44ff23b5fd1341d6176c8be9ece34 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 17:58:51 +0200 Subject: [PATCH 23/53] Minor code cleanup in startup.sh --- deploy-cluster-aws/startup.sh | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 027953b8..d526769e 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -31,30 +31,32 @@ FAB=`which fab` #BIGCHAINDIR=`dirname $DEPLOYDIR` #export BIGCHAINDIR -# check if python-fabric is installed +# Check if python-fabric is installed if [ ! -f "$FAB" ] then - echo "python-fabric is not installed!!!" + echo "python-fabric is not installed" exit 1 fi -# checking pem-file and changing access rights +# Check for AWS private key file pem-file and changing access rights if [ ! -f "pem/bigchaindb.pem" ] then echo "File pem/bigchaindb.pem (AWS private key) is missing" exit 1 fi -# 0400 for pem/bigchaindb.pem + +# Change the file permissions on pem/bigchaindb.pem +# so that the owner can read it, but that's all chmod 0400 pem/bigchaindb.pem -# Start the specified number of nodes +# Start the specified number of nodes on Amazon EC2 # and tag them with the specified tag python run_and_tag.py --tag $TAG --nodes $NODES # Wait until all those instances are running python wait_until_all_running.py --tag $TAG -# in case of elastic ips... +# Allocate elastic IP addresses and assign them to the instances python get_elastic_ips.py --tag $TAG # Create three files: @@ -79,7 +81,7 @@ fab install_rethinkdb # rollout bigchaindb fab install_bigchaindb -# generate genesisblock +# generate genesis block # HORST is the last public_dns_name listed in conf/bigchaindb.conf # For example: # ec2-52-58-86-145.eu-central-1.compute.amazonaws.com @@ -89,7 +91,7 @@ fab -H $HORST -f fab_prepare_chain.py init_bigchaindb # initiate sharding fab start_bigchaindb_nodes -# now cleanup! +# cleanup rm add2known_hosts.sh add2dbconf -# DONE! +# DONE From 24ec852e776fbdc7154331619787d453ee22335e Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 18:30:53 +0200 Subject: [PATCH 24/53] Simplify use of argparse in run_and_tag.py --- deploy-cluster-aws/run_and_tag.py | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py index c1eab98e..10cde20f 100644 --- a/deploy-cluster-aws/run_and_tag.py +++ b/deploy-cluster-aws/run_and_tag.py @@ -1,4 +1,7 @@ # -*- coding: utf-8 -*- +"""Launch the specified number of instances on Amazon EC2 +and tag them with the specified tag. +""" from __future__ import unicode_literals import boto3 @@ -12,25 +15,16 @@ AWS_REGION = os.environ['AWS_REGION'] parser = argparse.ArgumentParser() parser.add_argument("--tag", - help="tag to add to all launched instances on AWS") + help="tag to add to all launched instances on AWS", + required=True) parser.add_argument("--nodes", - help="number of nodes in the cluster") + help="number of nodes in the cluster", + required=True, + type=int) args = parser.parse_args() -if args.tag: - tag = args.tag -else: - # reading credentials from config for remote connection - print('usage: python run_and_tag.py --tag --nodes ') - print('reason: tag missing!!!') - exit(1) - -if args.nodes: - num_nodes = int(args.nodes) -else: - print('usage: python run_and_tag.py --tag --nodes ') - print('reason: nodes missing!!!') - exit(1) +tag = args.tag +num_nodes = int(args.nodes) # Connect to Amazon EC2 ec2 = boto3.resource(service_name='ec2', @@ -39,7 +33,8 @@ ec2 = boto3.resource(service_name='ec2', aws_secret_access_key=AWS_SECRET_ACCESS_KEY) -print('Starting and tagging instances on Amazon EC2...') +print('Starting {} instances on Amazon EC2 and tagging them with {}...'. + format(num_nodes, tag)) for _ in range(num_nodes): # Request the launch of one instance at a time From 1852192c75365b4286af357aa36de3fd01cd00c1 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 29 Mar 2016 18:52:57 +0200 Subject: [PATCH 25/53] Use require=True in create_hostlist.py --- deploy-cluster-aws/create_hostlist.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/deploy-cluster-aws/create_hostlist.py b/deploy-cluster-aws/create_hostlist.py index 0f5253a3..4610c750 100644 --- a/deploy-cluster-aws/create_hostlist.py +++ b/deploy-cluster-aws/create_hostlist.py @@ -10,16 +10,12 @@ AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] AWS_REGION = os.environ['AWS_REGION'] parser = argparse.ArgumentParser() -parser.add_argument("--tag", help="tag instances in aws") +parser.add_argument("--tag", + help="tag instances in aws", + required=True) args = parser.parse_args() -if args.tag: - tag = args.tag -else: - # reading credentials from config for remote connection - print('usage: python create_hostlist.py --tag ') - print('reason: tag missing!!!') - exit(1) +tag = args.tag # Connect to Amazon EC2 ec2 = boto3.resource(service_name='ec2', From 03c2d0f1f2b5ed585bc0d32ef686fefeddc8f84a Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 30 Mar 2016 10:16:02 +0200 Subject: [PATCH 26/53] AWS deploy: merge a bunch of py scripts into one --- deploy-cluster-aws/create_hostlist.py | 67 --------- deploy-cluster-aws/get_elastic_ips.py | 65 --------- deploy-cluster-aws/launch_ec2_nodes.py | 145 +++++++++++++++++++ deploy-cluster-aws/run_and_tag.py | 57 -------- deploy-cluster-aws/startup.sh | 24 ++- deploy-cluster-aws/wait_until_all_running.py | 46 ------ 6 files changed, 156 insertions(+), 248 deletions(-) delete mode 100644 deploy-cluster-aws/create_hostlist.py delete mode 100644 deploy-cluster-aws/get_elastic_ips.py create mode 100644 deploy-cluster-aws/launch_ec2_nodes.py delete mode 100644 deploy-cluster-aws/run_and_tag.py delete mode 100644 deploy-cluster-aws/wait_until_all_running.py diff --git a/deploy-cluster-aws/create_hostlist.py b/deploy-cluster-aws/create_hostlist.py deleted file mode 100644 index 4610c750..00000000 --- a/deploy-cluster-aws/create_hostlist.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- - -# from __future__ import unicode_literals -import argparse -import boto3 -import os - -AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] -AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] -AWS_REGION = os.environ['AWS_REGION'] - -parser = argparse.ArgumentParser() -parser.add_argument("--tag", - help="tag instances in aws", - required=True) -args = parser.parse_args() - -tag = args.tag - -# Connect to Amazon EC2 -ec2 = boto3.resource(service_name='ec2', - region_name=AWS_REGION, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - -# Get a list of all instances with the specified tag. -# (Technically, instances_with_tag is an ec2.instancesCollection.) -instances_with_tag = ec2.instances.filter( - Filters=[{'Name': 'tag:Name', 'Values': [tag]}] - ) - -publist = [] -for instance in instances_with_tag: - public_dns_name = getattr(instance, 'public_dns_name', None) - if public_dns_name is not None: - publist.append(public_dns_name) - -# Create shellscript add2known_hosts.sh for adding remote keys to known_hosts -with open('add2known_hosts.sh', 'w') as f: - f.write('#! /bin/bash\n') - for public_dns_name in publist: - f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n') - -# Create a file named add2dbconf, overwriting one if it already exists -with open('add2dbconf', 'w') as f: - f.write('## The host:port of a node that RethinkDB will connect to\n') - for public_dns_name in publist: - f.write('join=' + public_dns_name + ':29015\n') - -# Note: The original code by Andreas wrote a file with lines of the form -# join=public_dns_name_0:29015 -# join=public_dns_name_1:29015 -# but it stopped about halfway through the list of public_dns_names -# (publist). In principle, it's only strictly necessary to -# have one join= line. -# Maybe Andreas thought that more is better, but all is too much? -# Below is Andreas' original code. -Troy -# localFile = open('add2dbconf', 'w') -# before = 'join=' -# after = ':29015' -# localFile.write('## The host:port of a node that rethinkdb will connect to\n') -# for entry in range(0,int(len(publist)/2)): -# localFile.write(before + publist[entry] + after + '\n') - -# Create a file named hostlist.py, overwriting one if it already exists -with open('hostlist.py', 'w') as f: - f.write('hosts_dev = {}'.format(publist)) diff --git a/deploy-cluster-aws/get_elastic_ips.py b/deploy-cluster-aws/get_elastic_ips.py deleted file mode 100644 index 9976eb27..00000000 --- a/deploy-cluster-aws/get_elastic_ips.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- - -""" Create Elastic IPs and assign them to instances if needed. -""" - -from __future__ import unicode_literals -import os -import boto3 -import argparse -import time - -AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] -AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] -AWS_REGION = os.environ['AWS_REGION'] - -parser = argparse.ArgumentParser() -parser.add_argument("--tag", help="tag instances in aws") -args = parser.parse_args() - -if args.tag: - tag = args.tag -else: - # reading credentials from config for remote connection - print('usage: python get_elastic_ips.py --tag ') - print('reason: tag missing!!!') - exit(1) - -# Connect to Amazon EC2 -ec2 = boto3.resource(service_name='ec2', - region_name=AWS_REGION, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - - -# Get a list of all instances with the specified tag. -# (Technically, instances_with_tag is an ec2.instancesCollection.) -instances_with_tag = ec2.instances.filter( - Filters=[{'Name': 'tag:Name', 'Values': [tag]}] - ) - -print('Allocating elastic IP addresses and assigning them to the instances...') - -for instance in instances_with_tag: - # Create a client from the ec2 resource - # See http://boto3.readthedocs.org/en/latest/guide/clients.html - client = ec2.meta.client - - # Acquire an Elastic IP address - # response is a dict. See http://tinyurl.com/z2n7u9k - response = client.allocate_address(DryRun=False, Domain='standard') - public_ip = response['PublicIp'] - print('public_ip = {}'.format(public_ip)) - - # Associate that Elastic IP address with an instance - response2 = client.associate_address( - DryRun=False, - InstanceId=instance.instance_id, - PublicIp=public_ip - ) - print('was associated with the instance with id {}'. - format(instance.instance_id)) - -# Make sure all IP addresses are assigned... -print('Waiting 30 seconds to make sure all IP addresses are assigned...') -time.sleep(30) diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py new file mode 100644 index 00000000..5412a037 --- /dev/null +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +"""This script: +1. Launches the specified number of nodes (instances) on Amazon EC2, +2. tags them with the specified tag, +3. waits until those instances exist and are running, +4. for each instance, allocates an elastic IP address + and associates it with that instance, and +5. creates three files: + * add2known_hosts.sh + * add2dbconf + * hostlist.py +""" + +from __future__ import unicode_literals +import os +import time +import argparse +import boto3 + +AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] +AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] +AWS_REGION = os.environ['AWS_REGION'] + +parser = argparse.ArgumentParser() +parser.add_argument("--tag", + help="tag to add to all launched instances on AWS", + required=True) +parser.add_argument("--nodes", + help="number of nodes in the cluster", + required=True, + type=int) +args = parser.parse_args() + +tag = args.tag +num_nodes = int(args.nodes) + +# Connect to Amazon EC2 +ec2 = boto3.resource(service_name='ec2', + region_name=AWS_REGION, + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY) + +print('Commencing launch of {} instances on Amazon EC2...'. + format(num_nodes)) + +for _ in range(num_nodes): + # Request the launch of one instance at a time + # (so list_of_instances should contain only one item) + list_of_instances = ec2.create_instances( + ImageId='ami-accff2b1', # ubuntu-image + # 'ami-596b7235', # ubuntu w/ iops storage + MinCount=1, + MaxCount=1, + KeyName='bigchaindb', + InstanceType='m3.2xlarge', + # 'c3.8xlarge', + # 'c4.8xlarge', + SecurityGroupIds=['bigchaindb'] + ) + + # Tag the just-launched instances (should be just one) + for instance in list_of_instances: + time.sleep(5) + instance.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) + +# Get a list of all instances with the specified tag. +# (Technically, instances_with_tag is an ec2.instancesCollection.) +filters = [{'Name': 'tag:Name', 'Values': [tag]}] +instances_with_tag = ec2.instances.filter(Filters=filters) +print('The launched instances will have these ids:'.format(tag)) +for instance in instances_with_tag: + print(instance.id) + +print('Waiting until all those instances exist...') +for instance in instances_with_tag: + instance.wait_until_exists() + +print('Waiting until all those instances are running...') +for instance in instances_with_tag: + instance.wait_until_running() + +print('Allocating elastic IP addresses and assigning them to the instances...') + +for instance in instances_with_tag: + # Create a client from the ec2 resource + # See http://boto3.readthedocs.org/en/latest/guide/clients.html + client = ec2.meta.client + + # Acquire an Elastic IP address + # response is a dict. See http://tinyurl.com/z2n7u9k + response = client.allocate_address(DryRun=False, Domain='standard') + public_ip = response['PublicIp'] + print('The public IP address {}'.format(public_ip)) + + # Associate that Elastic IP address with an instance + response2 = client.associate_address( + DryRun=False, + InstanceId=instance.instance_id, + PublicIp=public_ip + ) + print('was associated with the instance with id {}'. + format(instance.instance_id)) + +wait_time = 45 +print('Waiting {} seconds to make sure all instances are ready...'. + format(wait_time)) +time.sleep(wait_time) + +# Get a list of the pubic DNS names of the instances_with_tag +publist = [] +for instance in instances_with_tag: + public_dns_name = getattr(instance, 'public_dns_name', None) + if public_dns_name is not None: + publist.append(public_dns_name) + +# Create shellscript add2known_hosts.sh for adding remote keys to known_hosts +with open('add2known_hosts.sh', 'w') as f: + f.write('#! /bin/bash\n') + for public_dns_name in publist: + f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n') + +# Create a file named add2dbconf, overwriting one if it already exists +with open('add2dbconf', 'w') as f: + f.write('## The host:port of a node that RethinkDB will connect to\n') + for public_dns_name in publist: + f.write('join=' + public_dns_name + ':29015\n') + +# Note: The original code by Andreas wrote a file with lines of the form +# join=public_dns_name_0:29015 +# join=public_dns_name_1:29015 +# but it stopped about halfway through the list of public_dns_names +# (publist). In principle, it's only strictly necessary to +# have one join= line. +# Maybe Andreas thought that more is better, but all is too much? +# Below is Andreas' original code. -Troy +# localFile = open('add2dbconf', 'w') +# before = 'join=' +# after = ':29015' +# localFile.write('## The host:port of a node that rethinkdb will connect to\n') +# for entry in range(0,int(len(publist)/2)): +# localFile.write(before + publist[entry] + after + '\n') + +# Create a file named hostlist.py, overwriting one if it already exists +with open('hostlist.py', 'w') as f: + f.write('hosts_dev = {}'.format(publist)) diff --git a/deploy-cluster-aws/run_and_tag.py b/deploy-cluster-aws/run_and_tag.py deleted file mode 100644 index 10cde20f..00000000 --- a/deploy-cluster-aws/run_and_tag.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -"""Launch the specified number of instances on Amazon EC2 -and tag them with the specified tag. -""" - -from __future__ import unicode_literals -import boto3 -import time -import argparse -import os - -AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] -AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] -AWS_REGION = os.environ['AWS_REGION'] - -parser = argparse.ArgumentParser() -parser.add_argument("--tag", - help="tag to add to all launched instances on AWS", - required=True) -parser.add_argument("--nodes", - help="number of nodes in the cluster", - required=True, - type=int) -args = parser.parse_args() - -tag = args.tag -num_nodes = int(args.nodes) - -# Connect to Amazon EC2 -ec2 = boto3.resource(service_name='ec2', - region_name=AWS_REGION, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - - -print('Starting {} instances on Amazon EC2 and tagging them with {}...'. - format(num_nodes, tag)) - -for _ in range(num_nodes): - # Request the launch of one instance at a time - # (so list_of_instances should contain only one item) - list_of_instances = ec2.create_instances( - ImageId='ami-accff2b1', # ubuntu-image - # 'ami-596b7235', # ubuntu w/ iops storage - MinCount=1, - MaxCount=1, - KeyName='bigchaindb', - InstanceType='m3.2xlarge', - # 'c3.8xlarge', - # 'c4.8xlarge', - SecurityGroupIds=['bigchaindb'] - ) - - # Tag the just-launched instances (should be just one) - for instance in list_of_instances: - time.sleep(5) - instance.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index d526769e..3d97a4c1 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -49,19 +49,17 @@ fi # so that the owner can read it, but that's all chmod 0400 pem/bigchaindb.pem -# Start the specified number of nodes on Amazon EC2 -# and tag them with the specified tag -python run_and_tag.py --tag $TAG --nodes $NODES - -# Wait until all those instances are running -python wait_until_all_running.py --tag $TAG - -# Allocate elastic IP addresses and assign them to the instances -python get_elastic_ips.py --tag $TAG - -# Create three files: -# add2known_hosts.sh, add2dbconf and hostlist.py -python create_hostlist.py --tag $TAG +# The following Python script does these things: +# 1. Launches the specified number of nodes (instances) on Amazon EC2, +# 2. tags them with the specified tag, +# 3. waits until those instances exist and are running, +# 4. for each instance, allocates an elastic IP address +# and associates it with that instance, and +# 5. creates three files: +# * add2known_hosts.sh +# * add2dbconf +# * hostlist.py +python launch_ec2_nodes.py --tag $TAG --nodes $NODES # Make add2known_hosts.sh executable and execute it chmod +x add2known_hosts.sh diff --git a/deploy-cluster-aws/wait_until_all_running.py b/deploy-cluster-aws/wait_until_all_running.py deleted file mode 100644 index 732ce617..00000000 --- a/deploy-cluster-aws/wait_until_all_running.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals -import os -import boto3 -import argparse - - -AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] -AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] -AWS_REGION = os.environ['AWS_REGION'] - -parser = argparse.ArgumentParser() -parser.add_argument("--tag", help="tag instances in aws") -args = parser.parse_args() - -if args.tag: - tag = args.tag -else: - # reading credentials from config for remote connection - print('usage: python get_instance_status.py --tag ') - print('reason: tag missing!!!') - exit(1) - -# Connect to Amazon EC2 -ec2 = boto3.resource(service_name='ec2', - region_name=AWS_REGION, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) - -# Get a list of all instances with the specified tag. -# (Technically, instances_with_tag is an ec2.instancesCollection.) -instances_with_tag = ec2.instances.filter( - Filters=[{'Name': 'tag:Name', 'Values': [tag]}] - ) -print('The instances with tag {} have these ids:'.format(tag)) -for instance in instances_with_tag: - print(instance.id) - -print('Waiting until all those instances exist...') -for instance in instances_with_tag: - instance.wait_until_exists() - -print('Waiting until all those instances are running...') -for instance in instances_with_tag: - instance.wait_until_running() From 579efa3e71cd928eacf71eb54289f9b74d6e1750 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 30 Mar 2016 11:21:58 +0200 Subject: [PATCH 27/53] Remove import unicode_literals from Fabric scripts --- deploy-cluster-aws/fab_prepare_chain.py | 2 +- deploy-cluster-aws/fabfile.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy-cluster-aws/fab_prepare_chain.py b/deploy-cluster-aws/fab_prepare_chain.py index caa827ad..f67c38e3 100644 --- a/deploy-cluster-aws/fab_prepare_chain.py +++ b/deploy-cluster-aws/fab_prepare_chain.py @@ -3,7 +3,7 @@ """ Generating genesis block """ -from __future__ import with_statement, unicode_literals +from __future__ import with_statement from fabric import colors as c from fabric.api import * diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index dff3ab75..ed51680b 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -4,7 +4,7 @@ bigchaindb, including its storage backend. """ -from __future__ import with_statement, unicode_literals +from __future__ import with_statement import requests from time import * From 7e2dac5766d2bf9a803fa967d1d1ea2b84639602 Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 30 Mar 2016 17:10:47 +0200 Subject: [PATCH 28/53] Minor formatting in fabfile.py --- deploy-cluster-aws/fabfile.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index ed51680b..7079ad0c 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -36,10 +36,10 @@ env.roledefs = { } env.roles = ["role1"] env.user = 'ubuntu' -env.key_filename = 'pem/bigchaindb.pem' +env.key_filename = ['pem/bigchaindb.pem', ] -################################################################################ +###################################################################### # base softwarestack rollout @task @@ -105,14 +105,14 @@ def install_bigchaindb(): @task @parallel def start_bigchaindb_nodes(): - sudo('screen -d -m bigchaindb -y start &', pty = False) + sudo('screen -d -m bigchaindb -y start &', pty=False) @task def install_newrelic(): with settings(warn_only=True): sudo('echo deb http://apt.newrelic.com/debian/ newrelic non-free >> /etc/apt/sources.list') - #sudo('apt-key adv --keyserver hkp://subkeys.pgp.net --recv-keys 548C16BF') + # sudo('apt-key adv --keyserver hkp://subkeys.pgp.net --recv-keys 548C16BF') sudo('apt-get update') sudo('apt-get -y --force-yes install newrelic-sysmond') sudo('nrsysmond-config --set license_key=c88af00c813983f8ee12e9b455aa13fde1cddaa8') From 62f953a6c7158dd5a76ef3721ee178eda9b166ae Mon Sep 17 00:00:00 2001 From: troymc Date: Wed, 30 Mar 2016 17:36:09 +0200 Subject: [PATCH 29/53] Put import unicode_literals back in Fabric pyfiles --- deploy-cluster-aws/fab_prepare_chain.py | 2 +- deploy-cluster-aws/fabfile.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy-cluster-aws/fab_prepare_chain.py b/deploy-cluster-aws/fab_prepare_chain.py index f67c38e3..caa827ad 100644 --- a/deploy-cluster-aws/fab_prepare_chain.py +++ b/deploy-cluster-aws/fab_prepare_chain.py @@ -3,7 +3,7 @@ """ Generating genesis block """ -from __future__ import with_statement +from __future__ import with_statement, unicode_literals from fabric import colors as c from fabric.api import * diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 7079ad0c..1fc64184 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -4,7 +4,7 @@ bigchaindb, including its storage backend. """ -from __future__ import with_statement +from __future__ import with_statement, unicode_literals import requests from time import * @@ -36,7 +36,7 @@ env.roledefs = { } env.roles = ["role1"] env.user = 'ubuntu' -env.key_filename = ['pem/bigchaindb.pem', ] +env.key_filename = 'pem/bigchaindb.pem' ###################################################################### From af275f0e4966a3cf685f9819df7654c21e525a2d Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 31 Mar 2016 10:31:09 +0200 Subject: [PATCH 30/53] Better handling of AWS elastic IPs --- deploy-cluster-aws/launch_ec2_nodes.py | 89 +++++++++++++++++++++++--- 1 file changed, 79 insertions(+), 10 deletions(-) diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 5412a037..1fc35606 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -15,8 +15,33 @@ from __future__ import unicode_literals import os import time import argparse +import botocore import boto3 + +def get_naeips(client0): + """Get a list of non-associated elastic IP addresses (NAEIPs) on EC2. + + Args: + client0: A client created from an EC2 resource. + e.g. client0 = ec2.meta.client + See http://boto3.readthedocs.org/en/latest/guide/clients.html + + Returns: + A list of NAEIPs in the EC2 account associated with the client. + To interpret the contents, see http://tinyurl.com/hrnuy74 + """ + # response is a dict with 2 keys: Addresses and ResponseMetadata + # See http://tinyurl.com/hrnuy74 + response = client0.describe_addresses() + allocated_eips = response['Addresses'] + non_associated_eips = [] + for eip in allocated_eips: + if 'InstanceId' not in eip: + non_associated_eips.append(eip) + return non_associated_eips + + AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] AWS_REGION = os.environ['AWS_REGION'] @@ -34,12 +59,54 @@ args = parser.parse_args() tag = args.tag num_nodes = int(args.nodes) -# Connect to Amazon EC2 +# Get an AWS EC2 "resource" +# See http://boto3.readthedocs.org/en/latest/guide/resources.html ec2 = boto3.resource(service_name='ec2', region_name=AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) +# Create a client from the EC2 resource +# See http://boto3.readthedocs.org/en/latest/guide/clients.html +client = ec2.meta.client + +# Before launching any instances, make sure they have sufficient +# allocated-but-unassociated EC2-Classic elastic IP addresses +print('Checking if you have enough allocated-but-unassociated ' + + 'EC2-Classic elastic IP addresses...') + +non_associated_eips = get_naeips(client) + +print('You have {} allocated elactic IPs which are ' + 'not already associated with instances'. + format(len(non_associated_eips))) + +# Note that the allocated addresses may include +# EC2-Classic and EC2-VPC elastic IP addresses. +# For now, I will assume that doesn't matter. +# -Troy + +if num_nodes > len(non_associated_eips): + num_eips_to_allocate = num_nodes - len(non_associated_eips) + print('You want to launch {} instances'. + format(num_nodes)) + print('so {} more elastic IPs must be allocated'. + format(num_eips_to_allocate)) + for _ in range(num_eips_to_allocate): + try: + # Allocate an elastic IP address + # response is a dict. See http://tinyurl.com/z2n7u9k + response = client.allocate_address(DryRun=False, Domain='standard') + except botocore.exceptions.ClientError: + print('Something went wrong when allocating an ' + 'EC2-Classic elastic IP address on EC2. ' + 'Maybe you are already at the maximum number allowed ' + 'by your AWS account? More details:') + raise + except: + print('Unexpected error:') + raise + print('Commencing launch of {} instances on Amazon EC2...'. format(num_nodes)) @@ -79,17 +146,19 @@ print('Waiting until all those instances are running...') for instance in instances_with_tag: instance.wait_until_running() -print('Allocating elastic IP addresses and assigning them to the instances...') +print('Associating allocated-but-unassociated elastic IPs ' + + 'with the instances...') -for instance in instances_with_tag: - # Create a client from the ec2 resource - # See http://boto3.readthedocs.org/en/latest/guide/clients.html - client = ec2.meta.client +# Get a list of elastic IPs which are allocated but +# not associated with any instances. +# There should be enough because we checked earlier and +# allocated more if necessary. +non_associated_eips_2 = get_naeips(client) - # Acquire an Elastic IP address - # response is a dict. See http://tinyurl.com/z2n7u9k - response = client.allocate_address(DryRun=False, Domain='standard') - public_ip = response['PublicIp'] +for i, instance in enumerate(instances_with_tag): + print('Grabbing an allocated but non-associated elastic IP...') + eip = non_associated_eips_2[i] + public_ip = eip['PublicIp'] print('The public IP address {}'.format(public_ip)) # Associate that Elastic IP address with an instance From a5a898cb60539430804cd1213b3ce02b54928b87 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 31 Mar 2016 10:57:05 +0200 Subject: [PATCH 31/53] Moved shared AWS constants & functions to awscommon --- deploy-cluster-aws/awscommon.py | 36 ++++++++++++++++++++++++++ deploy-cluster-aws/launch_ec2_nodes.py | 35 +++++-------------------- 2 files changed, 42 insertions(+), 29 deletions(-) create mode 100644 deploy-cluster-aws/awscommon.py diff --git a/deploy-cluster-aws/awscommon.py b/deploy-cluster-aws/awscommon.py new file mode 100644 index 00000000..5f180ccd --- /dev/null +++ b/deploy-cluster-aws/awscommon.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +"""Shared AWS-related global constants and functions. +""" + +from __future__ import unicode_literals +import os + + +# Global constants +AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] +AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] +AWS_REGION = os.environ['AWS_REGION'] + + +def get_naeips(client0): + """Get a list of (allocated) non-associated elastic IP addresses + (NAEIPs) on EC2. + + Args: + client0: A client created from an EC2 resource. + e.g. client0 = ec2.meta.client + See http://boto3.readthedocs.org/en/latest/guide/clients.html + + Returns: + A list of NAEIPs in the EC2 account associated with the client. + To interpret the contents, see http://tinyurl.com/hrnuy74 + """ + # response is a dict with 2 keys: Addresses and ResponseMetadata + # See http://tinyurl.com/hrnuy74 + response = client0.describe_addresses() + allocated_eips = response['Addresses'] + non_associated_eips = [] + for eip in allocated_eips: + if 'InstanceId' not in eip: + non_associated_eips.append(eip) + return non_associated_eips diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 1fc35606..2c4fe34b 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -12,39 +12,16 @@ """ from __future__ import unicode_literals -import os import time import argparse import botocore import boto3 - - -def get_naeips(client0): - """Get a list of non-associated elastic IP addresses (NAEIPs) on EC2. - - Args: - client0: A client created from an EC2 resource. - e.g. client0 = ec2.meta.client - See http://boto3.readthedocs.org/en/latest/guide/clients.html - - Returns: - A list of NAEIPs in the EC2 account associated with the client. - To interpret the contents, see http://tinyurl.com/hrnuy74 - """ - # response is a dict with 2 keys: Addresses and ResponseMetadata - # See http://tinyurl.com/hrnuy74 - response = client0.describe_addresses() - allocated_eips = response['Addresses'] - non_associated_eips = [] - for eip in allocated_eips: - if 'InstanceId' not in eip: - non_associated_eips.append(eip) - return non_associated_eips - - -AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] -AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] -AWS_REGION = os.environ['AWS_REGION'] +from awscommon import ( + AWS_ACCESS_KEY_ID, + AWS_SECRET_ACCESS_KEY, + AWS_REGION, + get_naeips, +) parser = argparse.ArgumentParser() parser.add_argument("--tag", From 531a60e8aa8a0ba3d02fef5e4c9050b77a3541c4 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 31 Mar 2016 14:38:11 +0200 Subject: [PATCH 32/53] Refactor how bigchaindb.conf gets written --- deploy-cluster-aws/create_bigchaindb_conf.py | 43 ++++++++++++ deploy-cluster-aws/launch_ec2_nodes.py | 70 ++++++++------------ deploy-cluster-aws/startup.sh | 19 +++--- 3 files changed, 79 insertions(+), 53 deletions(-) create mode 100644 deploy-cluster-aws/create_bigchaindb_conf.py diff --git a/deploy-cluster-aws/create_bigchaindb_conf.py b/deploy-cluster-aws/create_bigchaindb_conf.py new file mode 100644 index 00000000..4ac5a73e --- /dev/null +++ b/deploy-cluster-aws/create_bigchaindb_conf.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +"""(Re)create the RethinkDB configuration file conf/bigchaindb.conf. +Start with conf/bigchaindb.conf.template +then append additional configuration settings (lines). +""" + +from __future__ import unicode_literals +import os +import os.path +import shutil +from hostlist import hosts_dev + +# cwd = current working directory +old_cwd = os.getcwd() +os.chdir('conf') +if os.path.isfile('bigchaindb.conf'): + os.remove('bigchaindb.conf') + +# Create the initial bigchaindb.conf using bigchaindb.conf.template +shutil.copy2('bigchaindb.conf.template', 'bigchaindb.conf') + +# Append additional lines to bigchaindb.conf +with open('bigchaindb.conf', 'a') as f: + f.write('## The host:port of a node that RethinkDB will connect to\n') + for public_dns_name in hosts_dev: + f.write('join=' + public_dns_name + ':29015\n') + +os.chdir(old_cwd) + +# Note: The original code by Andreas wrote a file with lines of the form +# join=public_dns_name_0:29015 +# join=public_dns_name_1:29015 +# but it stopped about halfway through the list of public_dns_names +# (publist). In principle, it's only strictly necessary to +# have one join= line. +# Maybe Andreas thought that more is better, but all is too much? +# Below is Andreas' original code. -Troy +# lfile = open('add2dbconf', 'w') +# before = 'join=' +# after = ':29015' +# lfile.write('## The host:port of a node that rethinkdb will connect to\n') +# for entry in range(0,int(len(publist)/2)): +# lfile.write(before + publist[entry] + after + '\n') diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 2c4fe34b..578093ff 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -4,11 +4,10 @@ 2. tags them with the specified tag, 3. waits until those instances exist and are running, 4. for each instance, allocates an elastic IP address - and associates it with that instance, and -5. creates three files: - * add2known_hosts.sh - * add2dbconf - * hostlist.py + and associates it with that instance, +5. writes the shellscript add2known_hosts.sh +6. (over)writes a file named hostlist.py + containing a list of all public DNS names. """ from __future__ import unicode_literals @@ -147,45 +146,30 @@ for i, instance in enumerate(instances_with_tag): print('was associated with the instance with id {}'. format(instance.instance_id)) +# Get a list of the pubic DNS names of the instances_with_tag +hosts_dev = [] +for instance in instances_with_tag: + public_dns_name = getattr(instance, 'public_dns_name', None) + if public_dns_name is not None: + hosts_dev.append(public_dns_name) + +# Write a shellscript to add remote keys to ~/.ssh/known_hosts +print('Preparing shellscript to add remote keys to known_hosts') +with open('add2known_hosts.sh', 'w') as f: + f.write('#!/bin/bash\n') + for public_dns_name in hosts_dev: + f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n') + +# Create a file named hostlist.py containing hosts_dev. +# If a hostlist.py already exists, it will be overwritten. +print('Writing hostlist.py') +with open('hostlist.py', 'w') as f: + f.write('# -*- coding: utf-8 -*-\n') + f.write('from __future__ import unicode_literals\n') + f.write('hosts_dev = {}\n'.format(hosts_dev)) + +# Wait wait_time = 45 print('Waiting {} seconds to make sure all instances are ready...'. format(wait_time)) time.sleep(wait_time) - -# Get a list of the pubic DNS names of the instances_with_tag -publist = [] -for instance in instances_with_tag: - public_dns_name = getattr(instance, 'public_dns_name', None) - if public_dns_name is not None: - publist.append(public_dns_name) - -# Create shellscript add2known_hosts.sh for adding remote keys to known_hosts -with open('add2known_hosts.sh', 'w') as f: - f.write('#! /bin/bash\n') - for public_dns_name in publist: - f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n') - -# Create a file named add2dbconf, overwriting one if it already exists -with open('add2dbconf', 'w') as f: - f.write('## The host:port of a node that RethinkDB will connect to\n') - for public_dns_name in publist: - f.write('join=' + public_dns_name + ':29015\n') - -# Note: The original code by Andreas wrote a file with lines of the form -# join=public_dns_name_0:29015 -# join=public_dns_name_1:29015 -# but it stopped about halfway through the list of public_dns_names -# (publist). In principle, it's only strictly necessary to -# have one join= line. -# Maybe Andreas thought that more is better, but all is too much? -# Below is Andreas' original code. -Troy -# localFile = open('add2dbconf', 'w') -# before = 'join=' -# after = ':29015' -# localFile.write('## The host:port of a node that rethinkdb will connect to\n') -# for entry in range(0,int(len(publist)/2)): -# localFile.write(before + publist[entry] + after + '\n') - -# Create a file named hostlist.py, overwriting one if it already exists -with open('hostlist.py', 'w') as f: - f.write('hosts_dev = {}'.format(publist)) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 3d97a4c1..3374052e 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -54,20 +54,19 @@ chmod 0400 pem/bigchaindb.pem # 2. tags them with the specified tag, # 3. waits until those instances exist and are running, # 4. for each instance, allocates an elastic IP address -# and associates it with that instance, and -# 5. creates three files: -# * add2known_hosts.sh -# * add2dbconf -# * hostlist.py +# and associates it with that instance, +# 5. writes the shellscript add2known_hosts.sh +# 6. (over)writes a file named hostlist.py +# containing a list of all public DNS names. python launch_ec2_nodes.py --tag $TAG --nodes $NODES -# Make add2known_hosts.sh executable and execute it +# Make add2known_hosts.sh executable then execute it. +# This adds remote keys to ~/.ssh/known_hosts chmod +x add2known_hosts.sh ./add2known_hosts.sh -# Reset the RethinkDB configuration file and add the nodes to join -cp conf/bigchaindb.conf.template conf/bigchaindb.conf -cat add2dbconf >> conf/bigchaindb.conf +# (Re)create the RethinkDB configuration file conf/bigchaindb.conf +python create_bigchaindb_conf.py # rollout base packages (dependencies) needed before # storage backend (rethinkdb) and bigchaindb can be rolled out @@ -90,6 +89,6 @@ fab -H $HORST -f fab_prepare_chain.py init_bigchaindb fab start_bigchaindb_nodes # cleanup -rm add2known_hosts.sh add2dbconf +rm add2known_hosts.sh # DONE From 0a890801c5d84ee67a93288ff0e6c507fd34365d Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 31 Mar 2016 15:13:42 +0200 Subject: [PATCH 33/53] Rename bigchaindb.conf to rethinkdb.conf (@ AWS deployment) --- .gitignore | 2 +- ...ndb.conf.template => rethinkdb.conf.template} | 0 ...gchaindb_conf.py => create_rethinkdb_conf.py} | 16 ++++++++-------- deploy-cluster-aws/fabfile.py | 2 +- deploy-cluster-aws/startup.sh | 8 ++++---- 5 files changed, 14 insertions(+), 14 deletions(-) rename deploy-cluster-aws/conf/{bigchaindb.conf.template => rethinkdb.conf.template} (100%) rename deploy-cluster-aws/{create_bigchaindb_conf.py => create_rethinkdb_conf.py} (74%) diff --git a/.gitignore b/.gitignore index ae5d85fa..d94467b1 100644 --- a/.gitignore +++ b/.gitignore @@ -69,5 +69,5 @@ target/ *.pem # Some files created when deploying a cluster on AWS -deploy-cluster-aws/conf/bigchaindb.conf +deploy-cluster-aws/conf/rethinkdb.conf deploy-cluster-aws/hostlist.py diff --git a/deploy-cluster-aws/conf/bigchaindb.conf.template b/deploy-cluster-aws/conf/rethinkdb.conf.template similarity index 100% rename from deploy-cluster-aws/conf/bigchaindb.conf.template rename to deploy-cluster-aws/conf/rethinkdb.conf.template diff --git a/deploy-cluster-aws/create_bigchaindb_conf.py b/deploy-cluster-aws/create_rethinkdb_conf.py similarity index 74% rename from deploy-cluster-aws/create_bigchaindb_conf.py rename to deploy-cluster-aws/create_rethinkdb_conf.py index 4ac5a73e..268dd710 100644 --- a/deploy-cluster-aws/create_bigchaindb_conf.py +++ b/deploy-cluster-aws/create_rethinkdb_conf.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -"""(Re)create the RethinkDB configuration file conf/bigchaindb.conf. -Start with conf/bigchaindb.conf.template +"""(Re)create the RethinkDB configuration file conf/rethinkdb.conf. +Start with conf/rethinkdb.conf.template then append additional configuration settings (lines). """ @@ -13,14 +13,14 @@ from hostlist import hosts_dev # cwd = current working directory old_cwd = os.getcwd() os.chdir('conf') -if os.path.isfile('bigchaindb.conf'): - os.remove('bigchaindb.conf') +if os.path.isfile('rethinkdb.conf'): + os.remove('rethinkdb.conf') -# Create the initial bigchaindb.conf using bigchaindb.conf.template -shutil.copy2('bigchaindb.conf.template', 'bigchaindb.conf') +# Create the initial rethinkdb.conf using rethinkdb.conf.template +shutil.copy2('rethinkdb.conf.template', 'rethinkdb.conf') -# Append additional lines to bigchaindb.conf -with open('bigchaindb.conf', 'a') as f: +# Append additional lines to rethinkdb.conf +with open('rethinkdb.conf', 'a') as f: f.write('## The host:port of a node that RethinkDB will connect to\n') for public_dns_name in hosts_dev: f.write('join=' + public_dns_name + ':29015\n') diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 1fc64184..705c1b37 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -86,7 +86,7 @@ def install_rethinkdb(): # change fs to user sudo('chown -R rethinkdb:rethinkdb /data') # copy config file to target system - put('conf/bigchaindb.conf', + put('conf/rethinkdb.conf', '/etc/rethinkdb/instances.d/instance1.conf', mode=0600, use_sudo=True) # initialize data-dir sudo('rm -rf /data/*') diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 3374052e..4d3b69c1 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -65,8 +65,8 @@ python launch_ec2_nodes.py --tag $TAG --nodes $NODES chmod +x add2known_hosts.sh ./add2known_hosts.sh -# (Re)create the RethinkDB configuration file conf/bigchaindb.conf -python create_bigchaindb_conf.py +# (Re)create the RethinkDB configuration file conf/rethinkdb.conf +python create_rethinkdb_conf.py # rollout base packages (dependencies) needed before # storage backend (rethinkdb) and bigchaindb can be rolled out @@ -79,10 +79,10 @@ fab install_rethinkdb fab install_bigchaindb # generate genesis block -# HORST is the last public_dns_name listed in conf/bigchaindb.conf +# HORST is the last public_dns_name listed in conf/rethinkdb.conf # For example: # ec2-52-58-86-145.eu-central-1.compute.amazonaws.com -HORST=`tail -1 conf/bigchaindb.conf|cut -d: -f1|cut -d= -f2` +HORST=`tail -1 conf/rethinkdb.conf|cut -d: -f1|cut -d= -f2` fab -H $HORST -f fab_prepare_chain.py init_bigchaindb # initiate sharding From 72c77cd638c1b9a052b9c8282826c9a2b3a88d1e Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 31 Mar 2016 17:56:20 +0200 Subject: [PATCH 34/53] Deleted old AWS deployment Readme.md --- deploy-cluster-aws/Readme.md | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 deploy-cluster-aws/Readme.md diff --git a/deploy-cluster-aws/Readme.md b/deploy-cluster-aws/Readme.md deleted file mode 100644 index a821cadd..00000000 --- a/deploy-cluster-aws/Readme.md +++ /dev/null @@ -1,20 +0,0 @@ -## Create and configure the storage backend in Amazon's Cloud - -#### Getting started -- Checkout bigchaindb and copy bigchain-deployment to bigchaindb repository - -#### Prerequesites - - Valid AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY is needed, both are exported as variables to the shell - - awscli - DO WE EVEN USE THIS? - - boto - DEPENDENCY BEING REMOVED - - boto3 (Python 2 or 3) - - fabric w/ fabtools (Python 2 only) - -#### Cluster Installation - - Got to the DEPLOY-directory and run './startup.sh' with two parameters (tag and number of nodes)...that's it! - e.g.: ./startup.sh bro 7 to install a cluster tagged as bro with seven nodes. - -#### If an error occurs... -There are some issues during the rollout on Amazon (presumably also in other cloud/virtual environments): if you tested with a high sequence it might be possible, that you run into: - - NetworkError: Host key for ec2-xx-xx-xx-xx.eu-central-1.compute.amazonaws.com did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack. -If so, just clean up your known_hosts file and start again. From 3866537597f6b60ace260ed7b7db582ecbb83985 Mon Sep 17 00:00:00 2001 From: troymc Date: Thu, 31 Mar 2016 19:08:37 +0200 Subject: [PATCH 35/53] 1st draft of AWS deployment docs --- deploy-cluster-aws/launch_ec2_nodes.py | 18 ++-- deploy-cluster-aws/startup.sh | 7 +- docs/source/deploy-on-aws.md | 130 +++++++++++++++++++++++++ 3 files changed, 141 insertions(+), 14 deletions(-) create mode 100644 docs/source/deploy-on-aws.md diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 578093ff..95d78750 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- """This script: -1. Launches the specified number of nodes (instances) on Amazon EC2, +0. allocates more elastic IP addresses if necessary, +1. launches the specified number of nodes (instances) on Amazon EC2, 2. tags them with the specified tag, 3. waits until those instances exist and are running, -4. for each instance, allocates an elastic IP address - and associates it with that instance, +4. for each instance, it associates an elastic IP address + with that instance, 5. writes the shellscript add2known_hosts.sh 6. (over)writes a file named hostlist.py containing a list of all public DNS names. @@ -47,9 +48,9 @@ ec2 = boto3.resource(service_name='ec2', client = ec2.meta.client # Before launching any instances, make sure they have sufficient -# allocated-but-unassociated EC2-Classic elastic IP addresses +# allocated-but-unassociated EC2 elastic IP addresses print('Checking if you have enough allocated-but-unassociated ' + - 'EC2-Classic elastic IP addresses...') + 'EC2 elastic IP addresses...') non_associated_eips = get_naeips(client) @@ -57,11 +58,6 @@ print('You have {} allocated elactic IPs which are ' 'not already associated with instances'. format(len(non_associated_eips))) -# Note that the allocated addresses may include -# EC2-Classic and EC2-VPC elastic IP addresses. -# For now, I will assume that doesn't matter. -# -Troy - if num_nodes > len(non_associated_eips): num_eips_to_allocate = num_nodes - len(non_associated_eips) print('You want to launch {} instances'. @@ -75,7 +71,7 @@ if num_nodes > len(non_associated_eips): response = client.allocate_address(DryRun=False, Domain='standard') except botocore.exceptions.ClientError: print('Something went wrong when allocating an ' - 'EC2-Classic elastic IP address on EC2. ' + 'EC2 elastic IP address on EC2. ' 'Maybe you are already at the maximum number allowed ' 'by your AWS account? More details:') raise diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 4d3b69c1..5c8de655 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -50,11 +50,12 @@ fi chmod 0400 pem/bigchaindb.pem # The following Python script does these things: -# 1. Launches the specified number of nodes (instances) on Amazon EC2, +# 0. allocates more elastic IP addresses if necessary, +# 1. launches the specified number of nodes (instances) on Amazon EC2, # 2. tags them with the specified tag, # 3. waits until those instances exist and are running, -# 4. for each instance, allocates an elastic IP address -# and associates it with that instance, +# 4. for each instance, it associates an elastic IP address +# with that instance, # 5. writes the shellscript add2known_hosts.sh # 6. (over)writes a file named hostlist.py # containing a list of all public DNS names. diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md new file mode 100644 index 00000000..656fe651 --- /dev/null +++ b/docs/source/deploy-on-aws.md @@ -0,0 +1,130 @@ +# Deploy a Cluster on AWS + +This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS). We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances. + +## Why? + +You might ask why one would want to deploy a centrally-controlled BigchainDB cluster. Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization? + +That's true, but there are some reasons why one might want a centrally-controlled cluster: 1) for testing, and 2) for initial deployment. Afterwards, the control of each node can be handed over to a different entity. + +## Python Setup + +The instructions that follow have been tested on Ubuntu 14.04, but may also work on similar distros or operating systems. + +Our Python scripts for deploying to AWS use Python 2, so maybe create a Python 2 virtual environment and activate it. Then install the following Python packages (in that virtual environment): +```text +pip install fabric +pip install fabtools +pip install requests +pip install boto3 +``` + +What did you just install? + +* "[Fabric](http://www.fabfile.org/) is a Python (2.5-2.7) library and command-line tool for streamlining the use of SSH for application deployment or systems administration tasks." +* [fabtools](https://github.com/ronnix/fabtools) are "tools for writing awesome Fabric files" +* [requests](http://docs.python-requests.org/en/master/) is a Python package/library for sending HTTP requests +* "[Boto](https://boto3.readthedocs.org/en/latest/) is the Amazon Web Services (AWS) SDK for Python, which allows Python developers to write software that makes use of Amazon services like S3 and EC2." (`boto3` is the name of the latest Boto package.) + +Note: You _don't_ need to install `awscli` (AWS Command-Line Interface tools) but you can if you like. + +## AWS Setup + +Before you can deploy a BigchainDB cluster on AWS, you must have an AWS account. If you don't already have one, you can [sign up for one for free](https://aws.amazon.com/). + +### Create an AWS Access Key + +The next thing you'll need is an AWS access key. If you don't have one, you can create one using the [instructions in the AWS documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html). You should get an access key ID (e.g. AKIAIOSFODNN7EXAMPLE) and a secret access key (e.g. wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). + +Our AWS deployment scripts read the AWS access key information from environment variables. One way to set the appropriate environment variables is to edit your `~/.bashrc` file (or similar) by adding the lines: +```text +export AWS_ACCESS_KEY_ID=[[insert AWS access key here, with no brackets]] +export AWS_SECRET_ACCESS_KEY=[[insert AWS secret access key here, with no brackets]] +export AWS_REGION=eu-central-1 +``` + +You can change the `AWS_REGION` to a different one if you like. (It's where the cluster will be deployed.) The AWS documentation has [a list of them](http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + +You can force your terminal to re-read `~/.bashrc` by using +```text +source ~/.bashrc +``` + +or by opening a new terminal session. + +### Get Enough Amazon Elastic IP Addresses + +Our AWS deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). + +### Create an Amazon EC2 Key Pair + +Go to the AWS EC2 Console and select "Key Pairs" in the left sidebar. Click the "Create Key Pair" button. Give it the name `bigchaindb`. You should be prompted to save a file named `bigchaindb.pem`. That file contains the RSA private key. (Amazon keeps the corresponding public key.) Save the file in `bigchaindb/deploy-cluster-aws/pem/bigchaindb.pem`. + +You should not share your private key. + +### Create an Amazon EC2 Security Group + +Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. Give it the name `bigchaindb`. The description probably doesn't matter but we also put `bigchaindb` for that. + +Add some rules for Inbound traffic: + +* Type = All TCP, Protocol = TCP, Port Range = 0-65535, Source = 0.0.0.0/0 +* Type = SSH, Protocol = SSH, Port Range = 22, Source = 0.0.0.0/0 +* Type = All UDP, Protocol = UDP, Port Range = 0-65535, Source = 0.0.0.0/0 +* Type = All ICMP, Protocol = ICMP, Port Range = 0-65535, Source = 0.0.0.0/0 + +**Note: These rules are extremely lax! They're meant to make testing easy.** You'll want to tighten them up if you intend to have a secure cluster. For example, Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address." + + +## Deployment + +Here's an example of how one could launch a BigchainDB cluster of 4 nodes tagged `wrigley` on AWS: +```text +cd bigchaindb +cd deploy-cluster-aws +./startup.sh wrigley 4 +``` + +`startup.sh` is a Bash script which calls some Python 2 and Fabric scripts. Here's what it does: + +0. allocates more elastic IP addresses if necessary, +1. launches the specified number of nodes (instances) on Amazon EC2, +2. tags them with the specified tag, +3. waits until those instances exist and are running, +4. for each instance, it associates an elastic IP address with that instance, +5. adds remote keys to `~/.ssh/known_hosts`, +6. (re)creates the RethinkDB configuration file `conf/rethinkdb.conf`, +7. installs base (prerequisite) software on all instances, +8. installs RethinkDB on all instances, +9. installs BigchainDB on all instances, +10. generates the genesis block, +11. starts BigchainDB on all instances. + +It should take a few minutes for the deployment to finish. Once it's finished, you can login to your AWS EC2 Console (on the web) to see the instances just launched. + +There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that from the AWS EC2 Console. + +The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them from the AWS EC2 Console. + +## Known Issues + +### NetworkError + +If you tested with a high sequence it might be possible that you run into an error message like this: +```text +NetworkError: Host key for ec2-xx-xx-xx-xx.eu-central-1.compute.amazonaws.com +did not match pre-existing key! Server's key was changed recently, or possible +man-in-the-middle attack. +``` + +If so, just clean up your `known_hosts` file and start again. For example, you might copy your current `known_hosts` file to `old_known_hosts` like so: +```text +mv ~/.ssh/known_hosts ~/.ssh/old_known_hosts +``` + +Then terminate your instances and try deploying again with a different tag. + +### Failure when Installing Base Software + +If you get an error with installing the base software on the instances, then just terminate your instances and try deploying again with a different tag. From 1b18ceabe60f6cffff251c47f44f9596d8435395 Mon Sep 17 00:00:00 2001 From: troymc Date: Fri, 1 Apr 2016 09:02:44 +0200 Subject: [PATCH 36/53] Added deploy-on-aws to /docs/index.rst --- docs/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/index.rst b/docs/source/index.rst index ac7b3be9..8a75833f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -20,6 +20,7 @@ Table of Contents http-client-server-api python-driver-api-examples local-rethinkdb-cluster + deploy-on-aws cryptography models json-serialization From b1b25a2986436db23684f10e1cabfe5fb93ee4ae Mon Sep 17 00:00:00 2001 From: troymc Date: Fri, 1 Apr 2016 09:05:58 +0200 Subject: [PATCH 37/53] Noted AWS deployment scripts in CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca635f73..291e89fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ Tag name: TBD committed: TBD ### Added +- AWS deployment scripts: [Issue #151](https://github.com/bigchaindb/bigchaindb/issues/151) - `CHANGELOG.md` (this file) - Multisig support: [Pull Request #107](https://github.com/bigchaindb/bigchaindb/pull/107) - API/Wire protocol (RESTful HTTP API): [Pull Request #102](https://github.com/bigchaindb/bigchaindb/pull/102) From 86ac6b24834d476ea55746d66093e943fec70c10 Mon Sep 17 00:00:00 2001 From: troymc Date: Fri, 1 Apr 2016 09:54:51 +0200 Subject: [PATCH 38/53] Added note to docs re/ using a cluster deployed on AWS today --- docs/source/deploy-on-aws.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 656fe651..8a4646b6 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -2,6 +2,8 @@ This section explains a way to deploy a cluster of BigchainDB nodes on Amazon Web Services (AWS). We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances. +**NOTE: At the time of writing, these script _do_ launch a bunch of EC2 instances, and they do install RethinkDB plus BigchainDB on each instance, but don't expect to be able to use the cluster for anything useful. There are several issues related to configuration, networking, and external clients that must be sorted out first. That said, you might find it useful to try out the AWS deployment scripts, because setting up to use them, and using them, will be very similar once those issues get sorted out.** + ## Why? You might ask why one would want to deploy a centrally-controlled BigchainDB cluster. Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization? From 6d31fc92ffbfc21f9e5588fd37cc42a8f0431c29 Mon Sep 17 00:00:00 2001 From: troymc Date: Fri, 1 Apr 2016 16:06:41 +0200 Subject: [PATCH 39/53] Added a codecov.yml configuration file --- codecov.yml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 codecov.yml diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..490d0805 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,32 @@ +codecov: + branch: develop # the branch to show by default + + # The help text for bot says: + # "the username that will consume any oauth requests + # must have previously logged into Codecov" + # In GitHub - BigchainDB organization settings - Third-party access, + # it says, for Codecov: "approval requested by r-marques" + bot: r-marques + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + target: auto + if_no_uploads: error + + patch: + target: "80%" + if_no_uploads: error + + ignore: # files and folders that will be removed during processing + - "deploy-cluster-aws/*" + - "docs/*" + - "tests/*" + +comment: + layout: "header, diff, changes, sunburst, suggestions" + behavior: default \ No newline at end of file From dca275040714a7c83da791ecc19940ec6e8f268d Mon Sep 17 00:00:00 2001 From: Roderik van der Veer Date: Sun, 3 Apr 2016 10:18:44 +0200 Subject: [PATCH 40/53] Update the cryptography dependency to 1.2.3 This fixes the `conflicting types for 'BIO_new_mem_buf'` error when running with a recent openssl version, e.g. under Alpine Linux. See https://github.com/pyca/cryptography/issues/2750 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6156b417..26670814 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,7 @@ setup( 'rethinkdb==2.2.0.post4', 'pysha3==0.3', 'pytz==2015.7', - 'cryptography==1.2.1', + 'cryptography==1.2.3', 'statsd==3.2.1', 'python-rapidjson==0.0.6', 'logstats==0.2.1', From 722cc323931f44985a71c7d882a04e493beea786 Mon Sep 17 00:00:00 2001 From: Roderik van der Veer Date: Sun, 3 Apr 2016 11:43:25 +0200 Subject: [PATCH 41/53] Update the documentation to reflect the move of generate_keys to the crypto module --- docs/source/python-server-api-examples.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/source/python-server-api-examples.md b/docs/source/python-server-api-examples.md index cfb313ef..25fc5b19 100644 --- a/docs/source/python-server-api-examples.md +++ b/docs/source/python-server-api-examples.md @@ -40,8 +40,10 @@ At a high level, a "digital asset" is something which can be represented digital In BigchainDB, only the federation nodes are allowed to create digital assets, by doing a special kind of transaction: a `CREATE` transaction. ```python +from bigchaindb import crypto + # create a test user -testuser1_priv, testuser1_pub = b.generate_keys() +testuser1_priv, testuser1_pub = crypto.generate_key_pair() # define a digital asset data payload digital_asset_payload = {'msg': 'Hello BigchainDB!'} From ebb8e901da49c2f38dfb9318ef496911da43c978 Mon Sep 17 00:00:00 2001 From: troymc Date: Mon, 4 Apr 2016 11:39:25 +0200 Subject: [PATCH 42/53] Added HOW_TO_HANDLE_PULL_REQUESTS.md --- HOW_TO_HANDLE_PULL_REQUESTS.md | 53 ++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 HOW_TO_HANDLE_PULL_REQUESTS.md diff --git a/HOW_TO_HANDLE_PULL_REQUESTS.md b/HOW_TO_HANDLE_PULL_REQUESTS.md new file mode 100644 index 00000000..72a9e6d5 --- /dev/null +++ b/HOW_TO_HANDLE_PULL_REQUESTS.md @@ -0,0 +1,53 @@ +# How to Handle Pull Requests + +This document is for whoever has the ability to merge pull requests in the Git repositories associated with BigchainDB. + +If the pull request is from an employee of ascribe GmbH, then you can ignore this document. + +If the pull request is from someone who is _not_ an employee of ascribe, then: + +* Have they agreed to the Individual Contributor Agreement in the past? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document. +* Do they belong to a company or organization which agreed to the Entity Contributor Agreement in the past, and will they be contributing on behalf of that company or organization? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document. +* Otherwise, do the following: + +1. Go to the pull request in question and post a comment using this template: + +Hi @nameofuser + +Before we can merge this pull request, which may contain your intellectual property in the form of copyright or patents, our lawyers say we need you or your organization to agree to one of our contributor agreements. If you are contributing on behalf of yourself (and not on behalf of your employer or another organization you are part of) then you should: + +1. Go to: https://www.bigchaindb.com/cla/ +2. Read the Individual Contributor Agreement +3. Fill in the form "For Individuals" +4. Check the box to agree +5. Click the SEND button + +If you're contributing as an employee, and/or you want all employees of your employing organization to be covered by our contributor agreement, then someone in your organization with the authority to enter agreements on behalf of all employees must do the following: + +1. Go to: https://www.bigchaindb.com/cla/ +2. Read the Entity Contributor Agreement +3. Fill in the form "For Organizations” +4. Check the box to agree +5. Click the SEND button + +We will email you (or your employer) with further instructions. + +(END OF COMMENT) + +Once they click SEND, we (ascribe) will get an email with the information in the form. (Troy gets those emails for sure, I'm not sure who else.) The next step is to send an email to the email address submitted in the form, saying something like (where the stuff in [square brackets] should be replaced): + +Hi [NAME], + +The next step is for you to copy the following block of text into the comments of Pull Request #[NN] on GitHub: + +BEGIN BLOCK + +This is to confirm that I agreed to and accepted the BigchainDB [Entity/Individual] Contributor Agreement at https://www.bigchaindb.com/cla/ and to represent and warrant that I have authority to do so. + +[Insert long random string here. One good source of those is https://www.grc.com/passwords.htm ] + +END BLOCK + +(END OF EMAIL) + +The next step is to wait for them to copy that comment into the comments of the indicated pull request. Once they do so, it's safe to merge the pull request. From 0ecdbd82ea6cca67675b5b5abbddac9ba9e6aa61 Mon Sep 17 00:00:00 2001 From: troymc Date: Mon, 4 Apr 2016 11:44:54 +0200 Subject: [PATCH 43/53] Minor edit in HOW_TO_HANDLE_PULL_REQUESTS.md --- HOW_TO_HANDLE_PULL_REQUESTS.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/HOW_TO_HANDLE_PULL_REQUESTS.md b/HOW_TO_HANDLE_PULL_REQUESTS.md index 72a9e6d5..709deeb4 100644 --- a/HOW_TO_HANDLE_PULL_REQUESTS.md +++ b/HOW_TO_HANDLE_PULL_REQUESTS.md @@ -8,9 +8,7 @@ If the pull request is from someone who is _not_ an employee of ascribe, then: * Have they agreed to the Individual Contributor Agreement in the past? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document. * Do they belong to a company or organization which agreed to the Entity Contributor Agreement in the past, and will they be contributing on behalf of that company or organization? (Troy, Greg, and others have a list.) If yes, then you can merge the PR and ignore the rest of this document. -* Otherwise, do the following: - -1. Go to the pull request in question and post a comment using this template: +* Otherwise, go to the pull request in question and post a comment using this template: Hi @nameofuser From acc22fd0b2bf6baedd72f5b76fb3d642a9810d35 Mon Sep 17 00:00:00 2001 From: troymc Date: Mon, 4 Apr 2016 14:46:11 +0200 Subject: [PATCH 44/53] one line: pip install fabric fabtools requests boto3 --- docs/source/deploy-on-aws.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 8a4646b6..8d5bf727 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -16,10 +16,7 @@ The instructions that follow have been tested on Ubuntu 14.04, but may also work Our Python scripts for deploying to AWS use Python 2, so maybe create a Python 2 virtual environment and activate it. Then install the following Python packages (in that virtual environment): ```text -pip install fabric -pip install fabtools -pip install requests -pip install boto3 +pip install fabric fabtools requests boto3 ``` What did you just install? From 8bcecc38ffc569b439c6430edbb393a917b4157b Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 11:07:04 +0200 Subject: [PATCH 45/53] Remove fabric check in startup.sh --- deploy-cluster-aws/startup.sh | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/deploy-cluster-aws/startup.sh b/deploy-cluster-aws/startup.sh index 5c8de655..84d420cb 100755 --- a/deploy-cluster-aws/startup.sh +++ b/deploy-cluster-aws/startup.sh @@ -23,22 +23,8 @@ fi TAG=$1 NODES=$2 -FAB=`which fab` -# It seems BIGCHAINDIR was never used, but I wasn't sure -# so I just commented-out the following lines. -Troy -#DEPLOYDIR=`pwd` -#BIGCHAINDIR=`dirname $DEPLOYDIR` -#export BIGCHAINDIR - -# Check if python-fabric is installed -if [ ! -f "$FAB" ] - then - echo "python-fabric is not installed" - exit 1 -fi - -# Check for AWS private key file pem-file and changing access rights +# Check for AWS private key file (.pem file) if [ ! -f "pem/bigchaindb.pem" ] then echo "File pem/bigchaindb.pem (AWS private key) is missing" From b100e2820ca5ffebf4735cafd3ecb44987808de5 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 11:09:57 +0200 Subject: [PATCH 46/53] Moved AWS credentials & configs to standard ~/.aws files --- deploy-cluster-aws/awscommon.py | 6 ++--- deploy-cluster-aws/launch_ec2_nodes.py | 8 +------ docs/source/deploy-on-aws.md | 33 ++++++++++++++++---------- 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/deploy-cluster-aws/awscommon.py b/deploy-cluster-aws/awscommon.py index 5f180ccd..1e413d95 100644 --- a/deploy-cluster-aws/awscommon.py +++ b/deploy-cluster-aws/awscommon.py @@ -3,15 +3,13 @@ """ from __future__ import unicode_literals -import os # Global constants -AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] -AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] -AWS_REGION = os.environ['AWS_REGION'] +# None yet +# Functions def get_naeips(client0): """Get a list of (allocated) non-associated elastic IP addresses (NAEIPs) on EC2. diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 95d78750..3c37824d 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -17,9 +17,6 @@ import argparse import botocore import boto3 from awscommon import ( - AWS_ACCESS_KEY_ID, - AWS_SECRET_ACCESS_KEY, - AWS_REGION, get_naeips, ) @@ -38,10 +35,7 @@ num_nodes = int(args.nodes) # Get an AWS EC2 "resource" # See http://boto3.readthedocs.org/en/latest/guide/resources.html -ec2 = boto3.resource(service_name='ec2', - region_name=AWS_REGION, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY) +ec2 = boto3.resource(service_name='ec2') # Create a client from the EC2 resource # See http://boto3.readthedocs.org/en/latest/guide/clients.html diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 8d5bf727..55143703 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -14,9 +14,11 @@ That's true, but there are some reasons why one might want a centrally-controlle The instructions that follow have been tested on Ubuntu 14.04, but may also work on similar distros or operating systems. -Our Python scripts for deploying to AWS use Python 2, so maybe create a Python 2 virtual environment and activate it. Then install the following Python packages (in that virtual environment): +**Note: Our Python scripts for deploying to AWS use Python 2 because Fabric doesn't work with Python 3.** + +Maybe create a Python 2 virtual environment and activate it. Then install the following Python packages (in that virtual environment): ```text -pip install fabric fabtools requests boto3 +pip install fabric fabtools requests boto3 awscli ``` What did you just install? @@ -25,8 +27,7 @@ What did you just install? * [fabtools](https://github.com/ronnix/fabtools) are "tools for writing awesome Fabric files" * [requests](http://docs.python-requests.org/en/master/) is a Python package/library for sending HTTP requests * "[Boto](https://boto3.readthedocs.org/en/latest/) is the Amazon Web Services (AWS) SDK for Python, which allows Python developers to write software that makes use of Amazon services like S3 and EC2." (`boto3` is the name of the latest Boto package.) - -Note: You _don't_ need to install `awscli` (AWS Command-Line Interface tools) but you can if you like. +* [The aws-cli package](https://pypi.python.org/pypi/awscli), which is an AWS Command Line Interface (CLI). ## AWS Setup @@ -36,21 +37,27 @@ Before you can deploy a BigchainDB cluster on AWS, you must have an AWS account. The next thing you'll need is an AWS access key. If you don't have one, you can create one using the [instructions in the AWS documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html). You should get an access key ID (e.g. AKIAIOSFODNN7EXAMPLE) and a secret access key (e.g. wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -Our AWS deployment scripts read the AWS access key information from environment variables. One way to set the appropriate environment variables is to edit your `~/.bashrc` file (or similar) by adding the lines: +You should also pick a default AWS region name (e.g. `eu-central-1`). That's where your cluster will run. The AWS documentation has [a list of them](http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + +Once you've got your AWS access key, and you've picked a default AWS region name, go to a terminal session and enter: ```text -export AWS_ACCESS_KEY_ID=[[insert AWS access key here, with no brackets]] -export AWS_SECRET_ACCESS_KEY=[[insert AWS secret access key here, with no brackets]] -export AWS_REGION=eu-central-1 +aws configure ``` -You can change the `AWS_REGION` to a different one if you like. (It's where the cluster will be deployed.) The AWS documentation has [a list of them](http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). - -You can force your terminal to re-read `~/.bashrc` by using +and answer the four questions. For example: ```text -source ~/.bashrc +AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE +AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +Default region name [None]: eu-central-1 +Default output format [None]: [Press Enter] ``` -or by opening a new terminal session. +This writes two files: + +* `~/.aws/credentials` +* `~/.aws/config` + +AWS tools and packages look for those files. ### Get Enough Amazon Elastic IP Addresses From 3f491e5bd14b2af329975d4e75604276a0a8f3b3 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 15:21:24 +0200 Subject: [PATCH 47/53] Ensure Python 2.5-2.7 when using AWS deploy scripts --- deploy-cluster-aws/launch_ec2_nodes.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 3c37824d..2c717f6f 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -12,6 +12,7 @@ """ from __future__ import unicode_literals +import sys import time import argparse import botocore @@ -20,6 +21,17 @@ from awscommon import ( get_naeips, ) +# First, ensure they're using Python 2.5-2.7 +pyver = sys.version_info +major = pyver[0] +minor = pyver[1] +print('You are in an environment where "python" is Python {}.{}'. + format(major, minor)) +if not ((major == 2) and (minor >= 5) and (minor <= 7)): + print('but Fabric only works with Python 2.5-2.7') + sys.exit(1) + +# Parse the command-line arguments parser = argparse.ArgumentParser() parser.add_argument("--tag", help="tag to add to all launched instances on AWS", @@ -48,7 +60,7 @@ print('Checking if you have enough allocated-but-unassociated ' + non_associated_eips = get_naeips(client) -print('You have {} allocated elactic IPs which are ' +print('You have {} allocated elastic IPs which are ' 'not already associated with instances'. format(len(non_associated_eips))) From efd6514a04b52ea8afdd8429e622f3e38fad56a6 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 15:23:26 +0200 Subject: [PATCH 48/53] Fixed apt-get update problem in AWS deployment fabfile --- deploy-cluster-aws/fabfile.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index 705c1b37..c9f1e176 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -41,10 +41,14 @@ env.key_filename = 'pem/bigchaindb.pem' ###################################################################### -# base softwarestack rollout +# base software rollout @task @parallel def install_base_software(): + # new from Troy April 5, 2016. Why? See http://tinyurl.com/lccfrsj + sudo('rm -rf /var/lib/apt/lists/*') + sudo('apt-get -y clean') + # from before: sudo('apt-get -y update') sudo('dpkg --configure -a') sudo('apt-get -y -f install') From 838ed80770e048100613843abac5d559c1a12be2 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 16:50:23 +0200 Subject: [PATCH 49/53] Docs: Remove ugly red text from headings --- docs/source/installing-server.md | 2 +- docs/source/running-unit-tests.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/installing-server.md b/docs/source/installing-server.md index b16b0f8d..c168f62d 100644 --- a/docs/source/installing-server.md +++ b/docs/source/installing-server.md @@ -44,7 +44,7 @@ $ sudo dnf install libffi-devel gcc-c++ redhat-rpm-config python3-devel openssl- With OS-level dependencies installed, you can install BigchainDB Server with `pip` or from source. -### How to Install BigchainDB with `pip` +### How to Install BigchainDB with pip BigchainDB (i.e. both the Server and the officially-supported drivers) is distributed as a Python package on PyPI so you can install it using `pip`. First, make sure you have a version of `pip` installed for Python 3.4+: ```text diff --git a/docs/source/running-unit-tests.md b/docs/source/running-unit-tests.md index c04ccb22..1a4f0e34 100644 --- a/docs/source/running-unit-tests.md +++ b/docs/source/running-unit-tests.md @@ -26,7 +26,7 @@ You can also run all unit tests via `setup.py`, using: $ python setup.py test ``` -### Using `docker-compose` to Run the Tests +### Using docker-compose to Run the Tests You can also use `docker-compose` to run the unit tests. (You don't have to start RethinkDB first: `docker-compose` does that on its own, when it reads the `docker-compose.yml` file.) From 569b6ef7615d2a7ee7b3badcee4ac47488561b3b Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 16:51:24 +0200 Subject: [PATCH 50/53] Comment-out 2 cmds before apt-get update in fabfile --- deploy-cluster-aws/fabfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py index c9f1e176..22af0f04 100644 --- a/deploy-cluster-aws/fabfile.py +++ b/deploy-cluster-aws/fabfile.py @@ -46,8 +46,8 @@ env.key_filename = 'pem/bigchaindb.pem' @parallel def install_base_software(): # new from Troy April 5, 2016. Why? See http://tinyurl.com/lccfrsj - sudo('rm -rf /var/lib/apt/lists/*') - sudo('apt-get -y clean') + # sudo('rm -rf /var/lib/apt/lists/*') + # sudo('apt-get -y clean') # from before: sudo('apt-get -y update') sudo('dpkg --configure -a') From 2debba73c4b7c2f4dfd8ea04614f117e99117ff9 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 16:59:04 +0200 Subject: [PATCH 51/53] Add another 'Known Deployment Issue' to AWS deployment docs --- docs/source/deploy-on-aws.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 55143703..4b52cabb 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -113,7 +113,7 @@ There are fees associated with running instances on EC2, so if you're not using The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them from the AWS EC2 Console. -## Known Issues +## Known Deployment Issues ### NetworkError @@ -131,6 +131,10 @@ mv ~/.ssh/known_hosts ~/.ssh/old_known_hosts Then terminate your instances and try deploying again with a different tag. +### Failure of sudo apt-get update + +The first thing that's done on all the instances, once they're running, is basically [`sudo apt-get update`](http://askubuntu.com/questions/222348/what-does-sudo-apt-get-update-do). Sometimes that fails. If so, just terminate your instances and try deploying again with a different tag. (These problems seem to be time-bounded, so maybe wait a couple of hours before retrying.) + ### Failure when Installing Base Software If you get an error with installing the base software on the instances, then just terminate your instances and try deploying again with a different tag. From 1c399486e34d1359ae5376a7d7b698eee3a36918 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 17:00:01 +0200 Subject: [PATCH 52/53] Ensure newly-launched AWS instances don't have same tag as existing instances --- deploy-cluster-aws/launch_ec2_nodes.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py index 2c717f6f..c531821b 100644 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ b/deploy-cluster-aws/launch_ec2_nodes.py @@ -53,6 +53,23 @@ ec2 = boto3.resource(service_name='ec2') # See http://boto3.readthedocs.org/en/latest/guide/clients.html client = ec2.meta.client +# Ensure they don't already have some instances with the specified tag +# Get a list of all instances with the specified tag. +# (Technically, instances_with_tag is an ec2.instancesCollection.) +filters = [{'Name': 'tag:Name', 'Values': [tag]}] +instances_with_tag = ec2.instances.filter(Filters=filters) +# len() doesn't work on instances_with_tag. This does: +num_ins = 0 +for instance in instances_with_tag: + num_ins += 1 +if num_ins != 0: + print('You already have {} instances with the tag {} on EC2.'. + format(num_ins, tag)) + print('You should either pick a different tag or ' + 'terminate all those instances and ' + 'wait until they vanish from your EC2 Console.') + sys.exit(1) + # Before launching any instances, make sure they have sufficient # allocated-but-unassociated EC2 elastic IP addresses print('Checking if you have enough allocated-but-unassociated ' + From d6f471af5c92e3dc056dede9ff1ddc0d5e9bbae0 Mon Sep 17 00:00:00 2001 From: troymc Date: Tue, 5 Apr 2016 17:35:50 +0200 Subject: [PATCH 53/53] Docs now say more about how to check on AWS instances --- docs/source/deploy-on-aws.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/docs/source/deploy-on-aws.md b/docs/source/deploy-on-aws.md index 4b52cabb..2e4a18e2 100644 --- a/docs/source/deploy-on-aws.md +++ b/docs/source/deploy-on-aws.md @@ -107,7 +107,20 @@ cd deploy-cluster-aws 10. generates the genesis block, 11. starts BigchainDB on all instances. -It should take a few minutes for the deployment to finish. Once it's finished, you can login to your AWS EC2 Console (on the web) to see the instances just launched. +It should take a few minutes for the deployment to finish. If you run into problems, see the section on Known Deployment Issues below. + +The EC2 Console has a section where you can see all the instances you have running on EC2. You can `ssh` into a running instance using a command like: +```text +ssh -i pem/bigchaindb.pem ubuntu@ec2-52-29-197-211.eu-central-1.compute.amazonaws.com +``` + +except you'd replace the `ec2-52-29-197-211.eu-central-1.compute.amazonaws.com` with the public DNS name of the instance you want to `ssh` into. You can get that from the EC2 Console: just click on an instance and look in its details pane at the bottom of the screen. Some commands you might try: +```text +ip addr show +sudo service rethinkdb status +bigchaindb --help +bigchaindb show-config +``` There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that from the AWS EC2 Console.