From b237a021c61b4f48314b3bcb254615d274891b5a Mon Sep 17 00:00:00 2001 From: Troy McConaghy Date: Thu, 30 Nov 2017 17:50:22 +0100 Subject: [PATCH] Removed old AWS testing cluster stuff including docs --- .gitattributes | 9 - .gitignore | 11 - codecov.yml | 2 - deploy-cluster-aws/awscommon.py | 34 -- deploy-cluster-aws/awsdeploy.sh | 193 --------- deploy-cluster-aws/clusterize_confiles.py | 108 ------ .../conf/rethinkdb.conf.template | 105 ----- deploy-cluster-aws/create_rethinkdb_conf.py | 69 ---- deploy-cluster-aws/example_deploy_conf.py | 82 ---- deploy-cluster-aws/fabfile.py | 367 ------------------ deploy-cluster-aws/keypairs.py | 264 ------------- deploy-cluster-aws/launch_ec2_nodes.py | 337 ---------------- deploy-cluster-aws/make_confiles.sh | 38 -- deploy-cluster-aws/release_eips.py | 43 -- deploy-cluster-aws/write_keypairs_file.py | 49 --- .../source/appendices/aws-testing-cluster.md | 207 ---------- docs/server/source/appendices/index.rst | 1 - docs/server/source/clusters.md | 5 +- docs/server/source/introduction.md | 1 - 19 files changed, 2 insertions(+), 1923 deletions(-) delete mode 100644 .gitattributes delete mode 100644 deploy-cluster-aws/awscommon.py delete mode 100755 deploy-cluster-aws/awsdeploy.sh delete mode 100644 deploy-cluster-aws/clusterize_confiles.py delete mode 100644 deploy-cluster-aws/conf/rethinkdb.conf.template delete mode 100644 deploy-cluster-aws/create_rethinkdb_conf.py delete mode 100644 deploy-cluster-aws/example_deploy_conf.py delete mode 100644 deploy-cluster-aws/fabfile.py delete mode 100644 deploy-cluster-aws/keypairs.py delete mode 100644 deploy-cluster-aws/launch_ec2_nodes.py delete mode 100755 deploy-cluster-aws/make_confiles.sh delete mode 100644 deploy-cluster-aws/release_eips.py delete mode 100644 deploy-cluster-aws/write_keypairs_file.py delete mode 100644 docs/server/source/appendices/aws-testing-cluster.md diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index d278a72d..00000000 --- a/.gitattributes +++ /dev/null @@ -1,9 +0,0 @@ -deploy-cluster-aws export-ignore -docs export-ignore -ntools export-ignore -tests export-ignore -.gitattributes export-ignore -.gitignore export-ignore -.travis.yml export-ignore -*.md export-ignore -codecov.yml export-ignore diff --git a/.gitignore b/.gitignore index 49e2f620..5397ca64 100644 --- a/.gitignore +++ b/.gitignore @@ -65,17 +65,6 @@ target/ # pyenv .python-version -# Some files created when deploying a cluster on AWS -deploy-cluster-aws/conf/rethinkdb.conf -deploy-cluster-aws/confiles/ -deploy-cluster-aws/client_confile -deploy-cluster-aws/hostlist.py -deploy-cluster-aws/ssh_key.py - -# Ansible-specific files -ntools/one-m/ansible/hosts -ntools/one-m/ansible/ansible.cfg - # Just in time documentation docs/server/source/http-samples diff --git a/codecov.yml b/codecov.yml index 0ab4582d..3ba28a82 100644 --- a/codecov.yml +++ b/codecov.yml @@ -25,11 +25,9 @@ coverage: if_no_uploads: error ignore: # files and folders that will be removed during processing - - "deploy-cluster-aws/*" - "docs/*" - "tests/*" - "bigchaindb/version.py" - - "ntools/*" - "k8s/*" comment: diff --git a/deploy-cluster-aws/awscommon.py b/deploy-cluster-aws/awscommon.py deleted file mode 100644 index 1e413d95..00000000 --- a/deploy-cluster-aws/awscommon.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -"""Shared AWS-related global constants and functions. -""" - -from __future__ import unicode_literals - - -# Global constants -# None yet - - -# Functions -def get_naeips(client0): - """Get a list of (allocated) non-associated elastic IP addresses - (NAEIPs) on EC2. - - Args: - client0: A client created from an EC2 resource. - e.g. client0 = ec2.meta.client - See http://boto3.readthedocs.org/en/latest/guide/clients.html - - Returns: - A list of NAEIPs in the EC2 account associated with the client. - To interpret the contents, see http://tinyurl.com/hrnuy74 - """ - # response is a dict with 2 keys: Addresses and ResponseMetadata - # See http://tinyurl.com/hrnuy74 - response = client0.describe_addresses() - allocated_eips = response['Addresses'] - non_associated_eips = [] - for eip in allocated_eips: - if 'InstanceId' not in eip: - non_associated_eips.append(eip) - return non_associated_eips diff --git a/deploy-cluster-aws/awsdeploy.sh b/deploy-cluster-aws/awsdeploy.sh deleted file mode 100755 index b733ef2d..00000000 --- a/deploy-cluster-aws/awsdeploy.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/bin/bash - -set -euo pipefail -# -e Abort at the first failed line (i.e. if exit status is not 0) -# -u Abort when undefined variable is used -# -o pipefail (Bash-only) Piped commands return the status -# of the last failed command, rather than the status of the last command - -# Check for the first command-line argument -# (the name of the AWS deployment config file) -if [ -z "$1" ]; then - # no first argument was provided - echo "awsdeploy: missing file operand" - echo "Usage: awsdeploy DEPLOY_CONF_FILE" - echo "Deploy BigchainDB on AWS using the specified AWS deployment configuration file" - exit 1 -fi - -DEPLOY_CONF_FILE=$1 - -# Check to make sure DEPLOY_CONF_FILE exists -if [ ! -f "$DEPLOY_CONF_FILE" ]; then - echo "AWS deployment configuration file not found: "$DEPLOY_CONF_FILE - exit 1 -fi - -# Read DEPLOY_CONF_FILE -# to set environment variables related to AWS deployment -echo "Reading "$DEPLOY_CONF_FILE -source $DEPLOY_CONF_FILE - -# Check if SSH_KEY_NAME got set -if [ "$SSH_KEY_NAME" == "not-set-yet" ] || \ - [ "$SSH_KEY_NAME" == "" ] || \ - [ -z ${SSH_KEY_NAME+x} ]; then - echo "SSH_KEY_NAME was not set in that file" - exit 1 -fi - -echo "NUM_NODES = "$NUM_NODES -echo "BRANCH = "$BRANCH -echo "SSH_KEY_NAME" = $SSH_KEY_NAME -echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE -echo "IMAGE_ID = "$IMAGE_ID -echo "INSTANCE_TYPE = "$INSTANCE_TYPE -echo "SECURITY_GROUP = "$SECURITY_GROUP -echo "USING_EBS = "$USING_EBS -# Treat booleans as strings which must be either "True" or "False" -if [ "$USING_EBS" == "True" ]; then - echo "EBS_VOLUME_SIZE = "$EBS_VOLUME_SIZE - echo "EBS_OPTIMIZED = "$EBS_OPTIMIZED -fi -echo "ENABLE_WEB_ADMIN = "$ENABLE_WEB_ADMIN -if [ "$ENABLE_WEB_ADMIN" == "True" ]; then - echo "BIND_HTTP_TO_LOCALHOST = "$BIND_HTTP_TO_LOCALHOST -fi - -# Check for the SSH private key file -if [ ! -f "$HOME/.ssh/$SSH_KEY_NAME" ]; then - echo "The SSH private key file "$HOME"/.ssh/"$SSH_KEY_NAME" is missing" - exit 1 -fi - -# Check for the confiles directory -if [ ! -d "confiles" ]; then - echo "Directory confiles is needed but does not exist" - echo "See make_confiles.sh to find out how to make it" - exit 1 -fi - -# Check if NUM_NODES got set -if [ -z "$NUM_NODES" ]; then - echo "NUM_NODES is not set in the AWS deployment configuration file "$DEPLOY_CONF_FILE - exit 1 -fi - -# Check if the number of files in confiles directory == NUM_NODES -CONFILES_COUNT=`ls confiles | wc -l` -if [[ $CONFILES_COUNT != $NUM_NODES ]]; then - echo "ERROR: CONFILES_COUNT = "$CONFILES_COUNT - echo "but NUM_NODES = "$NUM_NODES - echo "so there should be "$NUM_NODES" files in the confiles directory" - exit 1 -fi - -# Auto-generate the tag to apply to all nodes in the cluster -TAG="BDB-Server-"`date +%m-%d@%H:%M` -echo "TAG = "$TAG - -# Change the file permissions on the SSH private key file -# so that the owner can read it, but that's all -chmod 0400 $HOME/.ssh/$SSH_KEY_NAME - -# The following Python script does these things: -# 0. allocates more elastic IP addresses if necessary, -# 1. launches the specified number of nodes (instances) on Amazon EC2, -# 2. tags them with the specified tag, -# 3. waits until those instances exist and are running, -# 4. for each instance, it associates an elastic IP address -# with that instance, -# 5. writes the shellscript add2known_hosts.sh -# 6. (over)writes a file named hostlist.py -# containing a list of all public DNS names. -# 7. (over)writes a file named ssh_key.py -# containing the location of the private SSH key file. -python launch_ec2_nodes.py --deploy-conf-file $DEPLOY_CONF_FILE --tag $TAG - -# Make add2known_hosts.sh executable then execute it. -# This adds remote keys to ~/.ssh/known_hosts -chmod +x add2known_hosts.sh -./add2known_hosts.sh - -# Test an SSH connection to one of the hosts -# and prompt the user for their SSH password if necessary -fab set_host:0 test_ssh - -# Rollout base packages (dependencies) needed before -# storage backend (RethinkDB) and BigchainDB can be rolled out -fab install_base_software -fab get_pip3 -fab upgrade_setuptools - -# (Re)create the RethinkDB configuration file conf/rethinkdb.conf -if [ "$ENABLE_WEB_ADMIN" == "True" ]; then - if [ "$BIND_HTTP_TO_LOCALHOST" == "True" ]; then - python create_rethinkdb_conf.py --enable-web-admin --bind-http-to-localhost - else - python create_rethinkdb_conf.py --enable-web-admin - fi -else - python create_rethinkdb_conf.py -fi - -# Rollout RethinkDB and start it -fab prep_rethinkdb_storage:$USING_EBS -fab install_rethinkdb -fab configure_rethinkdb -fab delete_rethinkdb_data -fab start_rethinkdb - -# Rollout BigchainDB (but don't start it yet) -if [ "$BRANCH" == "pypi" ]; then - fab install_bigchaindb_from_pypi -else - cd .. - rm -f bigchaindb-archive.tar.gz - git archive $BRANCH --format=tar --output=bigchaindb-archive.tar - gzip bigchaindb-archive.tar - mv bigchaindb-archive.tar.gz deploy-cluster-aws - cd deploy-cluster-aws - fab install_bigchaindb_from_git_archive - rm bigchaindb-archive.tar.gz -fi - -# Configure BigchainDB on all nodes - -# The idea is to send a bunch of locally-created configuration -# files out to each of the instances / nodes. - -# Assume a set of $NUM_NODES BigchaindB config files -# already exists in the confiles directory. -# One can create a set using a command like -# ./make_confiles.sh confiles $NUM_NODES -# (We can't do that here now because this virtual environment -# is a Python 2 environment that may not even have -# bigchaindb installed, so bigchaindb configure can't be called) - -# Transform the config files in the confiles directory -# to have proper keyrings etc. -if [ "$USE_KEYPAIRS_FILE" == "True" ]; then - python clusterize_confiles.py -k confiles $NUM_NODES -else - python clusterize_confiles.py confiles $NUM_NODES -fi - -# Send one of the config files to each instance -for (( HOST=0 ; HOST<$NUM_NODES ; HOST++ )); do - CONFILE="bcdb_conf"$HOST - echo "Sending "$CONFILE - fab set_host:$HOST send_confile:$CONFILE -done - -# Initialize BigchainDB (i.e. Create the RethinkDB database, -# the tables, the indexes, and genesis glock). Note that -# this will only be sent to one of the nodes, see the -# definition of init_bigchaindb() in fabfile.py to see why. -fab init_bigchaindb -fab set_shards:$NUM_NODES -echo "To set the replication factor to 3, do: fab set_replicas:3" -echo "To start BigchainDB on all the nodes, do: fab start_bigchaindb" - -# cleanup -rm add2known_hosts.sh diff --git a/deploy-cluster-aws/clusterize_confiles.py b/deploy-cluster-aws/clusterize_confiles.py deleted file mode 100644 index fbbf0dd6..00000000 --- a/deploy-cluster-aws/clusterize_confiles.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -"""Given a directory full of default BigchainDB config files, -transform them into config files for a cluster with proper -keyrings, API endpoint values, etc. This script is meant to -be interpreted as a Python 2 script. - -Note 1: This script assumes that there is a file named hostlist.py -containing public_dns_names = a list of the public DNS names of -all the hosts in the cluster. - -Note 2: If the optional -k argument is included, then a keypairs.py -file must exist and must have enough keypairs in it to assign one -to each of the config files in the directory of config files. -You can create a keypairs.py file using write_keypairs_file.py - -Usage: - python clusterize_confiles.py [-h] [-k] dir number_of_files -""" - -from __future__ import unicode_literals - -import os -import json -import argparse - -from hostlist import public_dns_names - -if os.path.isfile('keypairs.py'): - from keypairs import keypairs_list - - -# Parse the command-line arguments -desc = 'Transform a directory of default BigchainDB config files ' -desc += 'into config files for a cluster' -parser = argparse.ArgumentParser(description=desc) -parser.add_argument('dir', - help='Directory containing the config files') -parser.add_argument('number_of_files', - help='Number of config files expected in dir', - type=int) -parser.add_argument('-k', '--use-keypairs', - action='store_true', - default=False, - help='Use public and private keys from keypairs.py') -args = parser.parse_args() - -conf_dir = args.dir -num_files_expected = int(args.number_of_files) -use_keypairs = args.use_keypairs - -# Check if the number of files in conf_dir is what was expected -conf_files = sorted(os.listdir(conf_dir)) -num_files = len(conf_files) -if num_files != num_files_expected: - raise ValueError('There are {} files in {} but {} were expected'. - format(num_files, conf_dir, num_files_expected)) - -# If the -k option was included, check to make sure there are enough keypairs -# in keypairs_list -num_keypairs = len(keypairs_list) -if use_keypairs: - if num_keypairs < num_files: - raise ValueError('There are {} config files in {} but ' - 'there are only {} keypairs in keypairs.py'. - format(num_files, conf_dir, num_keypairs)) - -# Make a list containing all the public keys -if use_keypairs: - print('Using keypairs from keypairs.py') - pubkeys = [keypair[1] for keypair in keypairs_list[:num_files]] -else: - # read the pubkeys from the config files in conf_dir - pubkeys = [] - for filename in conf_files: - file_path = os.path.join(conf_dir, filename) - with open(file_path, 'r') as f: - conf_dict = json.load(f) - pubkey = conf_dict['keypair']['public'] - pubkeys.append(pubkey) - -# Rewrite each config file, one at a time -for i, filename in enumerate(conf_files): - file_path = os.path.join(conf_dir, filename) - with open(file_path, 'r') as f: - conf_dict = json.load(f) - # If the -k option was included - # then replace the private and public keys - # with those from keypairs_list - if use_keypairs: - keypair = keypairs_list[i] - conf_dict['keypair']['private'] = keypair[0] - conf_dict['keypair']['public'] = keypair[1] - # The keyring is the list of *all* public keys - # minus the config file's own public key - keyring = list(pubkeys) - keyring.remove(conf_dict['keypair']['public']) - conf_dict['keyring'] = keyring - # Allow incoming server traffic from any IP address - # to port 9984 - conf_dict['server']['bind'] = '0.0.0.0:9984' - - # Delete the config file - os.remove(file_path) - - # Write new config file with the same filename - print('Rewriting {}'.format(file_path)) - with open(file_path, 'w') as f2: - json.dump(conf_dict, f2) diff --git a/deploy-cluster-aws/conf/rethinkdb.conf.template b/deploy-cluster-aws/conf/rethinkdb.conf.template deleted file mode 100644 index c1541979..00000000 --- a/deploy-cluster-aws/conf/rethinkdb.conf.template +++ /dev/null @@ -1,105 +0,0 @@ -# -# RethinkDB instance configuration sample -# -# - Give this file the extension .conf and put it in /etc/rethinkdb/instances.d in order to enable it. -# - See http://www.rethinkdb.com/docs/guides/startup/ for the complete documentation -# - Uncomment an option to change its value. -# - -############################### -## RethinkDB configuration -############################### - -### Process options - -## User and group used to run rethinkdb -## Command line default: do not change user or group -## Init script default: rethinkdb user and group -# runuser=rethinkdb -# rungroup=rethinkdb - -## Stash the pid in this file when the process is running -## Note for systemd users: Systemd uses its own internal mechanism. Do not set this parameter. -## Command line default: none -## Init script default: /var/run/rethinkdb//pid_file (where is the name of this config file without the extension) -# pid-file=/var/run/rethinkdb/rethinkdb.pid - -### File path options - -## Directory to store data and metadata -## Command line default: ./rethinkdb_data -## Init script default: /var/lib/rethinkdb// (where is the name of this file without the extension) -directory=/data - -## Log file options -## Default: /log_file -#log-file=/var/log/rethinkdb - -### Network options - -## Address of local interfaces to listen on when accepting connections -## May be 'all' or an IP address, loopback addresses are enabled by default -## Default: all local addresses -# bind=127.0.0.1 -bind=all - -## Address that other rethinkdb instances will use to connect to this server. -## It can be specified multiple times -# canonical-address= - -## The port for rethinkdb protocol for client drivers -## Default: 28015 + port-offset -# driver-port=28015 - -## The port for receiving connections from other nodes -## Default: 29015 + port-offset -# cluster-port=29015 - -## The host:port of a node that rethinkdb will connect to -## This option can be specified multiple times. -## Default: none -# join=example.com:29015 - -## All ports used locally will have this value added -## Default: 0 -# port-offset=0 - -## r.http(...) queries will use the given server as a web proxy -## Default: no proxy -# reql-http-proxy=socks5://example.com:1080 - -### Web options - -## Port for the http admin console -## Default: 8080 + port-offset -# http-port=8080 - -## Disable web administration console -# no-http-admin - -### CPU options - -## The number of cores to use -## Default: total number of cores of the CPU -# cores=2 - -### Memory options - -## Size of the cache in MB -## Default: Half of the available RAM on startup -# cache-size=1024 - -### Disk - -## How many simultaneous I/O operations can happen at the same time -# io-threads=64 -#io-threads=128 - -## Enable direct I/O -direct-io - -### Meta - -## The name for this server (as will appear in the metadata). -## If not specified, it will be randomly chosen from a short list of names. -# server-name=server1 diff --git a/deploy-cluster-aws/create_rethinkdb_conf.py b/deploy-cluster-aws/create_rethinkdb_conf.py deleted file mode 100644 index a3bec896..00000000 --- a/deploy-cluster-aws/create_rethinkdb_conf.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -"""(Re)create the RethinkDB configuration file conf/rethinkdb.conf. -Start with conf/rethinkdb.conf.template -then append additional configuration settings (lines). -""" - -from __future__ import unicode_literals -import os -import os.path -import shutil -import argparse -from hostlist import public_dns_names - -# Parse the command-line arguments -parser = argparse.ArgumentParser() -# The next line isn't strictly necessary, but it clarifies the default case: -parser.set_defaults(enable_web_admin=False) -parser.add_argument('--enable-web-admin', - action='store_true', - help='should the RethinkDB web interface be enabled?') -# The next line isn't strictly necessary, but it clarifies the default case: -parser.set_defaults(bind_http_to_localhost=False) -parser.add_argument('--bind-http-to-localhost', - action='store_true', - help='should RethinkDB web interface be bound to localhost?') -args = parser.parse_args() -enable_web_admin = args.enable_web_admin -bind_http_to_localhost = args.bind_http_to_localhost - -# cwd = current working directory -old_cwd = os.getcwd() -os.chdir('conf') -if os.path.isfile('rethinkdb.conf'): - os.remove('rethinkdb.conf') - -# Create the initial rethinkdb.conf using rethinkdb.conf.template -shutil.copy2('rethinkdb.conf.template', 'rethinkdb.conf') - -# Append additional lines to rethinkdb.conf -with open('rethinkdb.conf', 'a') as f: - f.write('## The host:port of a node that RethinkDB will connect to\n') - for public_dns_name in public_dns_names: - f.write('join=' + public_dns_name + ':29015\n') - if not enable_web_admin: - f.write('## Disable the RethinkDB web administration console\n') - f.write('no-http-admin\n') - else: - # enable the web admin, i.e. don't disable it (the default), and: - if bind_http_to_localhost: - f.write('## Bind the web interface port to localhost\n') - # 127.0.0.1 is the usual IP address for localhost - f.write('bind-http=127.0.0.1\n') - -os.chdir(old_cwd) - -# Note: The original code by Andreas wrote a file with lines of the form -# join=public_dns_name_0:29015 -# join=public_dns_name_1:29015 -# but it stopped about halfway through the list of public_dns_names -# (publist). In principle, it's only strictly necessary to -# have one join= line. -# Maybe Andreas thought that more is better, but all is too much? -# Below is Andreas' original code. -Troy -# lfile = open('add2dbconf', 'w') -# before = 'join=' -# after = ':29015' -# lfile.write('## The host:port of a node that rethinkdb will connect to\n') -# for entry in range(0,int(len(publist)/2)): -# lfile.write(before + publist[entry] + after + '\n') diff --git a/deploy-cluster-aws/example_deploy_conf.py b/deploy-cluster-aws/example_deploy_conf.py deleted file mode 100644 index 6aab8f30..00000000 --- a/deploy-cluster-aws/example_deploy_conf.py +++ /dev/null @@ -1,82 +0,0 @@ -# AWS deployment config file - -# To use in a Bash shell script: -# source example_deploy_conf.py -# # $EXAMPLEVAR now has a value - -# To use in a Python script: -# from example_deploy_conf import * -# or -# import importlib -# cf = importlib.import_module('example_deploy_conf') -# # cf.EXAMPLEVAR now has a value - -# DON'T PUT SPACES AROUND THE = -# because that would confuse Bash. -# Example values: "string in double quotes", 32, True, False - -# NUM_NODES is the number of nodes to deploy -NUM_NODES=3 - -# BRANCH is either "pypi" or the name of a local Git branch -# (e.g. "master" or "feat/3627/optional-delimiter-in-txfile") -# It's where to get the BigchainDB code to be deployed on the nodes -BRANCH="master" - -# SSH_KEY_NAME is the name of the SSH private key file -# in $HOME/.ssh/ -# It is used for SSH communications with AWS instances. -SSH_KEY_NAME="not-set-yet" - -# USE_KEYPAIRS_FILE is either True or False -# Should node keypairs be read from keypairs.py? -# (If False, then the keypairs will be whatever is in the the -# BigchainDB config files in the confiles directory.) -USE_KEYPAIRS_FILE=False - -# IMAGE_ID is the Amazon Machine Image (AMI) id to use -# in all the servers/instances to be launched. -# Canonical (the company behind Ubuntu) generates many AMIs -# and you can search for one that meets your needs at: -# https://cloud-images.ubuntu.com/locator/ec2/ -# Example: At one point, if you searched for -# eu-central-1 16.04 LTS amd64 hvm:ebs-ssd -# you would get this AMI ID: -IMAGE_ID="ami-8504fdea" - -# INSTANCE_TYPE is the type of AWS instance to launch -# i.e. How many CPUs do you want? How much storage? etc. -# Examples: "t2.medium", "m3.2xlarge", "c3.8xlarge", "c4.8xlarge" -# For all options, see https://aws.amazon.com/ec2/instance-types/ -INSTANCE_TYPE="t2.medium" - -# SECURITY_GROUP is the name of the AWS security group to use. -# That security group must exist. -# Examples: "bigchaindb", "bcdbsecure" -SECURITY_GROUP="bigchaindb" - -# USING_EBS is True if you want to attach an Amazon EBS volume -USING_EBS=True - -# EBS_VOLUME_SIZE is the size of the EBS volume to attach, in GiB -# Since we assume 'gp2' volumes (for now), the possible range is 1 to 16384 -# If USING_EBS=False, EBS_VOLUME_SIZE is irrelevant and not used -EBS_VOLUME_SIZE=30 - -# EBS_OPTIMIZED is True or False, depending on whether you want -# EBS-optimized instances. See: -# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html -# Not all instance types support EBS optimization. -# Setting EBS_OPTIMIZED=True may cost more, but not always. -# If USING_EBS=False, EBS_OPTIMIZED is irrelevant and not used -EBS_OPTIMIZED=False - -# ENABLE_WEB_ADMIN is True or False, depending on whether you want -# the RethinkDB web administration console / interface to be enabled. -ENABLE_WEB_ADMIN=True - -# BIND_HTTP_TO_LOCALHOST is True or False, depending on whether -# you want the RethinkDB web interface port to be bound to localhost -# (which is more secure). See https://www.rethinkdb.com/docs/security/ -# If ENABLE_WEB_ADMIN is False, BIND_HTTP_TO_LOCALHOST is irrelevant and not used -BIND_HTTP_TO_LOCALHOST=True diff --git a/deploy-cluster-aws/fabfile.py b/deploy-cluster-aws/fabfile.py deleted file mode 100644 index 737109f9..00000000 --- a/deploy-cluster-aws/fabfile.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- -"""A Fabric fabfile with functionality to prepare, install, and configure -BigchainDB, including its storage backend (RethinkDB). -""" - -from __future__ import with_statement, unicode_literals - -from os import environ # a mapping (like a dict) -import sys - -from fabric.api import sudo, env, hosts -from fabric.api import task, parallel -from fabric.contrib.files import sed -from fabric.operations import run, put -from fabric.context_managers import settings - -from hostlist import public_dns_names -from ssh_key import ssh_key_path - -# Ignore known_hosts -# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts -env.disable_known_hosts = True - -# What remote servers should Fabric connect to? With what usernames? -env.user = 'ubuntu' -env.hosts = public_dns_names - -# SSH key files to try when connecting: -# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename -env.key_filename = ssh_key_path - - -###################################################################### - -# DON'T PUT @parallel -@task -def set_host(host_index): - """A helper task to change env.hosts from the - command line. It will only "stick" for the duration - of the fab command that called it. - - Args: - host_index (int): 0, 1, 2, 3, etc. - Example: - fab set_host:4 fab_task_A fab_task_B - will set env.hosts = [public_dns_names[4]] - but only for doing fab_task_A and fab_task_B - """ - env.hosts = [public_dns_names[int(host_index)]] - - -@task -def test_ssh(): - run('echo "If you see this, then SSH to a remote host worked."') - - -# Install base software -@task -@parallel -def install_base_software(): - # This deletes the dir where "apt-get update" stores the list of packages - sudo('rm -rf /var/lib/apt/lists/') - # Re-create that directory, and its subdirectory named "partial" - sudo('mkdir -p /var/lib/apt/lists/partial/') - # Repopulate the list of packages in /var/lib/apt/lists/ - # See https://tinyurl.com/zjvj9g3 - sudo('apt-get -y update') - # Configure all unpacked but unconfigured packages. - # See https://tinyurl.com/zf24hm5 - sudo('dpkg --configure -a') - # Attempt to correct a system with broken dependencies in place. - # See https://tinyurl.com/zpktd7l - sudo('apt-get -y -f install') - # For some reason, repeating the last three things makes this - # installation process more reliable... - sudo('apt-get -y update') - sudo('dpkg --configure -a') - sudo('apt-get -y -f install') - # Install the base dependencies not already installed. - sudo('apt-get -y install git g++ python3-dev libffi-dev') - sudo('apt-get -y -f install') - - -# Get an up-to-date Python 3 version of pip -@task -@parallel -def get_pip3(): - # One way: - # sudo('apt-get -y install python3-setuptools') - # sudo('easy_install3 pip') - # Another way: - sudo('apt-get -y install python3-pip') - # Upgrade pip - sudo('pip3 install --upgrade pip') - # Check the version of pip3 - run('pip3 --version') - - -# Upgrade setuptools -@task -@parallel -def upgrade_setuptools(): - sudo('pip3 install --upgrade setuptools') - - -# Prepare RethinkDB storage -@task -@parallel -def prep_rethinkdb_storage(USING_EBS): - """Prepare RethinkDB storage""" - # Convert USING_EBS from a string to a bool - USING_EBS = (USING_EBS.lower() == 'true') - - # Make the /data directory for RethinkDB data - sudo("mkdir -p /data") - - # OLD: with settings(warn_only=True): - if USING_EBS: # on /dev/xvdp - # See https://tinyurl.com/h2nut68 - sudo("mkfs -t ext4 /dev/xvdp") - sudo("mount /dev/xvdp /data") - # To mount this EBS volume on every system reboot, - # add an entry for the device to the /etc/fstab file. - # First, make a copy of the current /etc/fstab file - sudo("cp /etc/fstab /etc/fstab.orig") - # Append a line to /etc/fstab - sudo("echo '/dev/xvdp /data ext4 defaults,nofail,nobootwait 0 2' >> /etc/fstab") - # Veryify the /etc/fstab file. If something is wrong with it, - # then this should produce an error: - sudo("mount -a") - # Set the I/O scheduler for /dev/xdvp to deadline - with settings(sudo_user='root'): - sudo("echo deadline > /sys/block/xvdp/queue/scheduler") - else: # not using EBS. - # Using the "instance store" that comes with the instance. - # If the instance store comes with more than one volume, - # this only mounts ONE of them: /dev/xvdb - # For example, m3.2xlarge instances have /dev/xvdb and /dev/xvdc - # and /mnt is mounted on /dev/xvdb by default. - try: - sudo("umount /mnt") - sudo("mkfs -t ext4 /dev/xvdb") - sudo("mount /dev/xvdb /data") - except: - pass - sudo("rm -rf /etc/fstab") - sudo("echo 'LABEL=cloudimg-rootfs / ext4 defaults,discard 0 0' >> /etc/fstab") - sudo("echo '/dev/xvdb /data ext4 defaults,noatime 0 0' >> /etc/fstab") - # Set the I/O scheduler for /dev/xdvb to deadline - with settings(sudo_user='root'): - sudo("echo deadline > /sys/block/xvdb/queue/scheduler") - - -# Install RethinkDB -@task -@parallel -def install_rethinkdb(): - """Install RethinkDB""" - # Old way: - # sudo("echo 'deb http://download.rethinkdb.com/apt trusty main' | sudo tee /etc/apt/sources.list.d/rethinkdb.list") - # New way: (from https://www.rethinkdb.com/docs/install/ubuntu/ ) - sudo('source /etc/lsb-release && ' - 'echo "deb http://download.rethinkdb.com/apt $DISTRIB_CODENAME main" | ' - 'sudo tee /etc/apt/sources.list.d/rethinkdb.list') - sudo("wget -qO- http://download.rethinkdb.com/apt/pubkey.gpg | sudo apt-key add -") - sudo("apt-get update") - sudo("apt-get -y install rethinkdb") - # Change owner:group of the RethinkDB data directory to rethinkdb:rethinkdb - sudo('chown -R rethinkdb:rethinkdb /data') - - -# Configure RethinkDB -@task -@parallel -def configure_rethinkdb(): - """Copy the RethinkDB config file to the remote host""" - put('conf/rethinkdb.conf', - '/etc/rethinkdb/instances.d/instance1.conf', - mode=0600, - use_sudo=True) - - -# Delete RethinkDB data -@task -@parallel -def delete_rethinkdb_data(): - """Delete the contents of the RethinkDB /data directory - but not the directory itself. - """ - sudo('rm -rf /data/*') - - -# Start RethinkDB -@task -@parallel -def start_rethinkdb(): - """Start RethinkDB""" - sudo('/etc/init.d/rethinkdb restart') - - -# Install BigchainDB from PyPI -@task -@parallel -def install_bigchaindb_from_pypi(): - sudo('pip3 install bigchaindb') - - -# Install BigchainDB from a Git archive file -# named bigchaindb-archive.tar.gz -@task -@parallel -def install_bigchaindb_from_git_archive(): - put('bigchaindb-archive.tar.gz') - run('tar xvfz bigchaindb-archive.tar.gz') - sudo('pip3 install .') - # sudo('python3 setup.py install') - run('rm bigchaindb-archive.tar.gz') - - -# Configure BigchainDB -@task -@parallel -def configure_bigchaindb(): - run('bigchaindb -y configure rethinkdb', pty=False) - - -# Send the specified configuration file to -# the remote host and save it there in -# ~/.bigchaindb -# Use in conjunction with set_host() -# No @parallel -@task -def send_confile(confile): - put('confiles/' + confile, 'tempfile') - run('mv tempfile ~/.bigchaindb') - print('For this node, bigchaindb show-config says:') - run('bigchaindb show-config') - - -# Initialize BigchainDB -# i.e. create the database, the tables, -# the indexes, and the genesis block. -# (The @hosts decorator is used to make this -# task run on only one node. See http://tinyurl.com/h9qqf3t ) -@task -@hosts(public_dns_names[0]) -def init_bigchaindb(): - run('bigchaindb init', pty=False) - - -# Set the number of shards (in all tables) -@task -@hosts(public_dns_names[0]) -def set_shards(num_shards): - run('bigchaindb set-shards {}'.format(num_shards)) - - -# Set the number of replicas (in all tables) -@task -@hosts(public_dns_names[0]) -def set_replicas(num_replicas): - run('bigchaindb set-replicas {}'.format(num_replicas)) - - -# Start BigchainDB using screen -@task -@parallel -def start_bigchaindb(): - sudo('screen -d -m bigchaindb -y start &', pty=False) - - -# Install and run New Relic -@task -@parallel -def install_newrelic(): - newrelic_license_key = environ.get('NEWRELIC_KEY') - if newrelic_license_key is None: - sys.exit('The NEWRELIC_KEY environment variable is not set') - else: - # Andreas had this "with settings(..." line, but I'm not sure why: - # with settings(warn_only=True): - # Use the installation instructions from NewRelic: - # http://tinyurl.com/q9kyrud - # ...with some modifications - sudo("echo 'deb http://apt.newrelic.com/debian/ newrelic non-free' >> " - "/etc/apt/sources.list.d/newrelic.list") - sudo('wget -O- https://download.newrelic.com/548C16BF.gpg | ' - 'apt-key add -') - sudo('apt-get update') - sudo('apt-get -y --force-yes install newrelic-sysmond') - sudo('nrsysmond-config --set license_key=' + newrelic_license_key) - sudo('/etc/init.d/newrelic-sysmond start') - - -########################### -# Security / Firewall Stuff -########################### - -@task -def harden_sshd(): - """Security harden sshd. - """ - # Disable password authentication - sed('/etc/ssh/sshd_config', - '#PasswordAuthentication yes', - 'PasswordAuthentication no', - use_sudo=True) - # Deny root login - sed('/etc/ssh/sshd_config', - 'PermitRootLogin yes', - 'PermitRootLogin no', - use_sudo=True) - - -@task -def disable_root_login(): - """Disable `root` login for even more security. Access to `root` account - is now possible by first connecting with your dedicated maintenance - account and then running ``sudo su -``. - """ - sudo('passwd --lock root') - - -@task -def set_fw(): - # snmp - sudo('iptables -A INPUT -p tcp --dport 161 -j ACCEPT') - sudo('iptables -A INPUT -p udp --dport 161 -j ACCEPT') - # dns - sudo('iptables -A OUTPUT -p udp -o eth0 --dport 53 -j ACCEPT') - sudo('iptables -A INPUT -p udp -i eth0 --sport 53 -j ACCEPT') - # rethinkdb - sudo('iptables -A INPUT -p tcp --dport 28015 -j ACCEPT') - sudo('iptables -A INPUT -p udp --dport 28015 -j ACCEPT') - sudo('iptables -A INPUT -p tcp --dport 29015 -j ACCEPT') - sudo('iptables -A INPUT -p udp --dport 29015 -j ACCEPT') - sudo('iptables -A INPUT -p tcp --dport 8080 -j ACCEPT') - sudo('iptables -A INPUT -i eth0 -p tcp --dport 8080 -j DROP') - sudo('iptables -I INPUT -i eth0 -s 127.0.0.1 -p tcp --dport 8080 -j ACCEPT') - # save rules - sudo('iptables-save > /etc/sysconfig/iptables') - - -######################################################### -# Some helper-functions to handle bad behavior of cluster -######################################################### - -# rebuild indexes -@task -@parallel -def rebuild_indexes(): - run('rethinkdb index-rebuild -n 2') - - -@task -def stopdb(): - sudo('service rethinkdb stop') - - -@task -def startdb(): - sudo('service rethinkdb start') - - -@task -def restartdb(): - sudo('/etc/init.d/rethinkdb restart') diff --git a/deploy-cluster-aws/keypairs.py b/deploy-cluster-aws/keypairs.py deleted file mode 100644 index f10be7b8..00000000 --- a/deploy-cluster-aws/keypairs.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -"""A set of keypairs for use in deploying -BigchainDB servers with a predictable set of keys. -""" - -from __future__ import unicode_literals - -keypairs_list = [('E72MmhHGiwywGMHdensZreNPTtAKvRxYQEyQEqUpLvXL', - 'Ar1Xt6bpmeyNnWoBUAAi8VqLPboK84bvB417FKmxcJzp'), - ('BYupx6PLnAqcTgrqsYopKeHYjmSY5F4rpSVoFv6vK3r6', - '6UkRsEhRW7RT6WYPJkW4j4aiqLiXhpyP7H1WRj2toCv3'), - ('A3FwThyWmydgjukcpF9SmTzWQ4yoRoV9jTni1t4oicz4', - '91cuZ3GvQkEkR8UVV456fVxiujBSqd9JMp7p3XaHnVUT'), - ('CkA7fS6aGmo8JPw3yuchz31AnP7KcxncQiu3pQ81X2Mj', - 'PDuBGWm4BnSSkTTxSWVd59PAzFqidaLfFo86aTLZoub'), - ('7aoKCGN4QK82yVpErN1EJyn8ciQXBUkVBe1snx7wypEf', - 'AXoar7qdJZF2kaTJb19PhqY7iSdX3AEef7GBwb9N8WT6'), - ('1GGrwAx34CbTfaV55KdCKah2G5FThjrTdTQk3gTD97x', - '853HbGVt6hT7q17cN6CtDycuTyoncUDWscdo4GUMKntp'), - ('7C6BZbk3Xi4nB1o4mUXMty4rs22CeF8dLQ2dUKhyi9qs', - 'GVu5QTqKeMGhgz8AgzfVRYP5F3HopkqgabQhRqjujEdN'), - ('2WXPBsMGmwjMv7Eg5iqgLAq2VQW1GF6AVAWuveYLXy3Z', - 'AuBnVm277newtkgfyjGQrr3X8ykKwzVzrcpqF67muQ4N'), - ('H67sRSm8W6gVmR1N3SqWXF3WTx9Arhc1RtwvLEjmhm9t', - '5SQamPP4dWUhu2L247TMnf8vX1C5vuB3jtfh1BpVSsPg'), - ('GYztiuCLEvG4wrVszbXKs9AXbKBbDZVhw35xsq8XF63S', - '6pxa9WydnD1xRFReR1yHruXL8VtFu3c6kCNBXkwAyDXA'), - ('G7x9iHnJkjKkdLEsrV2kGZ7tFBm9wj2Pive7vRZss47', - '23MvXauT6cKMLrNyxN41jnZv83aKghLP4B37bvemjENa'), - ('3MhdzHYRrFrPmQZfXfpNKLw9xEdFNZNfUcehgBmboH43', - 'Buqfw4nFfuLoHJcfYZvxXBJf5rTm5ypSyuJfL11jDFkn'), - ('B2MbWPXDPSWQAwNirepSws7a4sgKNCVtmduGh5f54Koq', - 'Cus791pRcuoVJe24WME2QYeEAX1R4uiTGNxa3HwzwtQY'), - ('7yHSmHHX4WwsZ4H6oQxxytkGRqKPiMdqftSvRqXiomYj', - '2qVE6baeD57raXJnNwyUeWi1VyfpQ21QW1J374zMGD6o'), - ('E2V7mzxce6J8PZw8rUEZXYYVnTFRkMSfTty7duohox6V', - 'HSs1oWnvTfjrMmVouRtFJYLjfgeC1uxEiA8MX9F98A34'), - ('4yP4RH18nt3DDFzhpLGborEJuS7hx4cKaz6AAe1xNChe', - 'FziConq7CF4h6TYc1W4wYtmJbhNnrAGoareRkeoRLKTi'), - ('HGgVjtNG2U6odxbAAR31UAcHknzenY88GxYhnURy2S5C', - '82miL67GzT9fTVt8hFiE2XJBRr7iNXAvFLnuiFj5HyjV'), - ('AWY2DyCDbMQqx6v5KtcoW1f9qQd5NqiibeLFpABwibEn', - '9KgHN7xTLa34hfwGq4RpW71jsKjyPKRtaAdAvjHuATtb'), - ('BYE1oV6Dyf49Qedrtf3UaVny9D7NEUhtx78pD1X38M6x', - '3ve8upjPmX9vvdEqvir7QBxnXQAyBWiZKwWyEhq47ptx'), - ('BiZLPsA8Q3faqLPxrcMP1TT77XUYd2jceAkuB9bFCzUa', - 'DrL1j2ZXLvBzk2TmA4DxsRmoR3oCSpW8YPvDCMCei1dU'), - ('FNPkTYojwJ4F4psnzbWt8XnNRBqRhwHXog8Pb8Kuuq7V', - 'FRxatYaiuuKBtvvERSADKNtSGPDY7iPzCmMaLDnPSuG8'), - ('2LiAeAJHjGsrkUmd42jdgPAyKEfzWVttrmnXu6rzfyYM', - 'FwQ3jTBnJpY62paSLdudyymZPUDSWy3827wY13jTJUmC'), - ('Gcu8TPtFM2tv9gtUo5yQYyBg7KHQWxnW9Dk3bp4zh5VC', - 'G3UrGxBB4UCUnxFNKrhmS1rpj3Z7bq484hZUUfNqprx1'), - ('HQGHpzMDdB3sYqfJJC5GuyreqaSjpqfoGytZL9NVtg8T', - 'GA9eu5RDuReBnjUcSSg9CK4X4A678YTrxHFxCpmWhDjM'), - ('2of61RBw2ARZcPD4XJFcnpx18Aj1mU4viUMVN2AXaJsE', - '3aDSyU3E5Kmn9emoUXrktFKH4i7t4uaKBmHNFFhErYU8'), - ('J8oF1sfJzXxJL1bDxPwCtDYw1FEh1prVAhWZF8dw1fRa', - '2atybus8CnehWNVj1DcCEidc3uD2Q7q4tiP5ok2CuNSD'), - ('AxMvjM1w3i3XQVjH8oVVGhiic9kgtvrrDzxnWKdwhdQo', - 'DXYvSgETSxy4vfU2rqPoZFumKw5aLWErkmEDR2w2sR7h'), - ('GBuyEpUQTf2v21NAKozUbUQnwwiugHNY9Uh2kPqBwqXK', - 'CLDPdckwDKa3qiLnoKuNFW8yHRjJdU37XE6skAmageJK'), - ('Bc8ykuXeq7HutQkveQfYXQ28BbFkjRpZCAEuRsAMtxuF', - 'B45qxKWDPnoE1C5KzunsMvfHmRgZHfz2LzxaM1LTqVwF'), - ('9H9v7uKAWScvy34ZQfWJW2NoJ3SWf2NuaqzabcaVuh4h', - '4Kj9wUpHKfgJbjyLNmMYwEwnotUmsgTDKMCusHbM5gcz'), - ('2kWx8nor8McDSZsg8vJ7hZrc3aUVtZhcVcvNeT14iSFo', - '3S9ase3dQd5oz3L7ELGivAsUyaTosK9C5X1aiZNtgcwi'), - ('ENEDnokpqJhziw9CPiGDCnNRwSDgnGjAPh1L7XABWP6s', - '2sUKDdtfVaUXZCN6V6WecweBL8ZEY5mCfPBTj4xzhQtq'), - ('FPUYgS4VvQ5WaZaQqnrJppBZQasoSMwZ4LyhUBKYnE6Q', - 'FtP6Zak6EEWpuptqxSoPAySfm4yA6rWAQqxMCi6s6RYp'), - ('FhQjcEjy36p27YGjKzWicdABNWzEYGciSU5Eht98o2eg', - '2hZ3Fby9K5jYQdtrhvehKTeJgq4NDJY46p4oBT7NUAv5'), - ('5JD7STAtYDUeMqvA75FxTGUw6mSFmFvnVMJZJkTHLafH', - 'HCGf4nWF7q4v4GBPXxTdWMjU7o3SifxfmKzTQ1dWmFqo'), - ('3VLPrCmUog6mBVqkTuSJzXP7ZABCA6ejQKu9LpzkJs6s', - 'Bap6iTjmZb781zLxSmELoqVA25mbMuL7B8WdAz5wDygG'), - ('EiQ57ZLHQvtLbEbiJ41ViZmPctFfd51EFEaK6Y3HZcYb', - '5uu84u8um1CfuT1pvpdFKMy5oWuU4BfWRbpRHzG4eW4A'), - ('3hM9hy2LSqe2SsrcE7XeNz1aPPPZgK5bnTeboyFgFsyj', - '3ptDB8YwcU9EiafreJnFSyfNuoKMMws7U7auMadhRxdr'), - ('3LoFwupCNbPk4cMYVS58UHtkDhvKpdYNmMJPEbK5hnat', - 'CQ56mX3agjJoWwt2uDSa7gtzHWyg3y4Lqp16rZL9qUdF'), - ('F9X1XsTcFwJN196D1PdCc88WrVrBGhfDgQnezeXW9Vjx', - '79cg39iLMZHPFbXrWo6aJAbsXFPk7kgqgBxijDbDLKA'), - ('Hf1XCRfcXc6sQZVchcvv54Sod8BjBFqsiU5Wu4eX6bTd', - '4o8pJV5jaNVqbQhw1u9EzoHT9m69bkfDSGVGugBYeiPC'), - ('2hamLVNSruGH8uT7YvXa3AUcsspg2ERqFqT11gbKpbUK', - '3SziPezcFQbhPrGVJrm5D8HVAZSjduBhFanaXBZcGr3s'), - ('6u92HEbihHiorTANWBs5nYsHJSJ21SfSqsD4FwZy8UZr', - '9jo5yogiEVYwxCkzYoHrn7WMnxpRqqJxbAFuMA2TuzmW'), - ('4YJJNsfEz3eiBE48w8kihENuwDXGbS1vYLi27663EDvw', - 'xcAieBttVYi8g9NQBBjf9jPoaMoWx3hA1h3iCcB11jC'), - ('CUSUaZiUyy8f9yf59RSeorwQJGGnVgR6humfvmzpBMmS', - 'EbR1dthGhu82wPJT7MmqKu84eKNKQXEuUm6Lqdf4NLXu'), - ('5RBfhrADkYu5yFKtgdVZPq1k78VcQc3VZr4kjWpXmACs', - 'Ev4PviNfb87KH5HSXEj7gN7uBYLbHWFSFqQPsoYcMHK7'), - ('4M4UiTmPLY6H4AhSkgUkKQ6cRceixyL6oT86AUvK9tTs', - '4VuGTkZ62PbgKEotnMtyG6L2M76v3qabhPZKXeJ1npca'), - ('BDAWs8i2GbRySDC5SCStzFdEvnfiqCTEbu9mpZRoKdA8', - 'FoyMqh9tcY6xCyLxdByrW8xgzAqGNJKR9dPEN7CjPmQ2'), - ('Dm1HwCxzLm76hBGAG2NEziNRiPBiYnQoKivPm5JC3477', - 'Ap747d6xaUofhsFoyXbg7SCpH53tsD8zbLY39QS2jWfC'), - ('6dRpaKGL3pzaoBX1dKsduuDkkPfUB1yBb1taCYZoNGw2', - '7PoRrQTBXmCkKuwvLxQceBbUwqo4eReNTxVaGVT6npdn'), - ('Cb6sghYERbQA5VMdxKiZx1xk6j6heJNbW1TxRTMwkquu', - 'Am8zvPbAgk2ERqmhGzJZL1NCNkEUjF6enXCwczp4d97B'), - ('EhaLhpbbRCfCuLbq3rQS1d4PfE6rHgvYA9MpTGaxACgW', - 'EfeeApbq1jBChfhe13JkEPbUfm1JYYFCdKXdtue6YrH5'), - ('353aMTUrjH628XzVnKH2oyRmMaAdJ4antn5fGNAzfqMN', - 'AqustPmyDtVpFDiUEqWfFjDeVBQhvKYZFU4wjfpXRXee'), - ('7x8v2BEkdyDvzVzbRJR9AztZHLv8kUZfwRRmcPEpHEYj', - '88MTxTfy7Btqxwdf5Xo7TmjzACeuNop8MeE63LikQn4k'), - ('2jnPZg4oeBzbqL6TdpyTdoxraqjWHqfSrzfmS5Qh8D4V', - '3GSJUg4s6ydymn9obTxdSVBkxpmWZLCGuvBK9fMECefe'), - ('N8DS5DA18i2Bh7rEz7nJSSJZycz8qKaNkPP58BCh7Zr', - 'AKjy7whpaoUnbDJXzNHbtBHawWnS7tLha3nfMPXh4Qam'), - ('DUQ3pGX5XQtvucPDcNEQPMLrqCMxCbRBuWmHHddNg83Q', - 'F3vakqePy8xmpb23psZahDVEdu4dywCPQB7fCMsP5mp3'), - ('6ABw5HQZSWWJr2Ud6KmD73azu732iNTvEfWbCotCFLrn', - 'GW9eq8JgkHDLjtENDscTK5Bj9AAC3css7SPxLZCPcS2V'), - ('ByNJL8Eo8B6kKH5UuJxiXBRRrAKfALLvQmt2Rq5JgAA4', - 'GEtT15SrZUDxVpLjS4metu4BXYw4o1NmxzH5Wr2DcqAv'), - ('F9XaoqP4A4zZoPB6phfTP8i7CQsnSakh6bk8B1CTLwqy', - '9XLZaFGco78AXQS9dmHZ6zypjtg1Z33pj4KoTtDmnLa6'), - ('ESamPv9kb87uEBZjfgarKTQwSTcEQMetBH44b8o3mPZC', - 'Nv7eXkgL84o8fQLjagN1bVj7bt5FKF6Ah1Md6WWwyLk'), - ('E43hqzYjZZ1XRSME6d37Q99UifT4d23piq1CN3fMp6cv', - 'HLMB1uPdRuYcQyM9UmY9zerxQa3cYqEaRUku3h9oRBQn'), - ('3qfPXUTeCsVRk9L68uyGF5u3XxFSVBtPkATtHayVgCGs', - 'ZEkiCeoj3FGfudrN4xgtbz1VihkKWm4cgHN9qJ4p4GH'), - ('7fxCmzKhvNGpbn9U2vih9N1aF5UXaVER6NSpwn3HPpoy', - 'CmhLU67kWqbL2kyj8bA5TNcg7HiQFJeamcJhe5BB1PAx'), - ('BhJsfuvhj9PqfvnvNGQX26fR5SXvcq7JdhhrWyZHoXT9', - 'CgMqrhrjr4mBMvTgiHLqgvf4tRzUpZuLtQnMSG1Jjgx2'), - ('GZbkL2W22Z2YwHf5SBwRfSEYQf1tquPkELDQjkwm2yU4', - 'E47ijUUheN1Zz8TWKmbcDDWz5nduBvZNtcgqbGRiiGv6'), - ('9Puc7H9PRHZ2oowzxiuGueZCzNY1X3aSuopy7k4w8TTo', - 'FTjTVxsPjiNw6TnbwBeE7WpZbvJuVEMwbdPCt1NppHhc'), - ('BczGQKaQNu8QkTc4PWmPdrbLfmXFzAqnoJ9YzHTU1vez', - '4m4xe8fjWAFHyNYLMRYDXskG2d5o9xZxgzCzca23uBBH'), - ('BZwZrE1hNzKzfnbThE9MiB5Movox67c7uGJmi9Nhef1j', - '5G6reNxH3e1gyMSgBRCYQJypFtTSBQ85r5fQGw6DfnpM'), - ('DFJxcvaR5Xk2bHiuxZzaqDxLDSq6fGSUdg54c5zAFLKz', - 'BRL9LWweehDAcEPc8MXjd3uQtAt4ZK1LY8J5KT3GeYKm'), - ('5wfyCc1mAhp2DCKMmEQG9nW4bKfaVkk8kpjuerApiFXv', - 'rdqo7bdePrF6wR8v8dzJopEHgqNgt2yNmMjxz6wMguQ'), - ('8S42sTQQqr5LJTa6jBjCfNg6xvjeL95btPJt2MPHBrDo', - '7VJjwATaownwJyTWXJxKEtJk46eEXTm9UaioPvVFD2zD'), - ('57WwYQgHHSu7SYrXXmovjiPDmc2BB25itp6xSu5KrQQn', - 'FGW86z4ymEbtqiSpp6zzpDkzdPZv9xDMCGUdGVBz8KLU'), - ('CcxnCDQ4JgH2ceTEPW75GcfW8rP7aiAT8ZuEtYbqEa7w', - '7kQdXRZNJaWo7Gj4XtT1fV4LD4ZtN8VmxdZFiJE8q8xF'), - ('8CYTgLp2kbVJKqnadQNGZorWcdWNpbaXrt6kvdzJnEjv', - '57Zwyf4FUEWTxEWrmbSb6vrcZBukHmCs7TKzKoygV6cf'), - ('4buY9tDvVRpTjfAjM8Up4vWw5yh37xWteSMAerbxpKpv', - '5FvFDCSZgtc57hSpvBqBd8VjhyAJ2a2vxTiHzg2nPyg9'), - ('5jJ8hry8Pu7rkgKkWcmZzfZ5FWk6rT3TnYGesEhfijvt', - '7hmVhrQ8vmHmNhxyvyW1cHF5N6gzRoBy7kimfj4b2uZ5'), - ('6MUnCTEZFZvsKTCW4NKDiPv4a3SRWZLS7gUNP4nXsFBh', - '5m2oXtepVwbKt9t5er72bFNioiHYMRtHcUu176DVFBQu'), - ('GXuU171dpf8JpBLiVgXksyXrdkqDqm6AWJ5A2JZLkkwV', - 'BF6xtHg3kcBKHCJ9Y6TTPyGYn3MDKLqxVDshVUbgaCAk'), - ('DoRUYrhULJbAnsGot4hYZeHUaFgXj4hwhHiGRUP3rZCj', - '8i67E6uPyrRvAN5WqSX9V7xeSGr4nPXqAgnR2pPQj3ew'), - ('At4gvM1wZt6ACte2o26Yxbn5qaodMZBd7U1WsiBwB42x', - 'GBPGPkSkkcM4KmJRqKjCPiEygVLW8LmRRarYvj967fbV'), - ('48D3mw2apqVQa6KYCjGqDFiG5cbwqZEotL2w8aPWCxtE', - '2Byvg9DGK7Axk9Bp6bmiUoBQkkLsRNrcoq2ZPZu5ZyGg'), - ('2YncoUMac2tNMcRFEGEgvuDXb58RdyqHMWmSN2QTMiCP', - 'BSNXYAX8Em2TjuCDvmLV3GgnxYT6vX68YFwoGPaPpsSa'), - ('7As7DVaC6FBqojvFoTo9jgZTcTGx9QYdVwUhNSNNvUsz', - 'E5cMypehm8j2Zxw3dCXKc1MGTCftJJm9FzqPNwnVEgQn'), - ('AAwj9V5iW88EwoZpLwuRvqwKn8c8rSgKAyZduFAfvqvV', - 'CkTks2ZGnQdM19wucQMehPe1imANjnRAAfLd1uewGUx8'), - ('axH9mijksto4vnoTjbbJJfY8iBFziJL2y39ago41WNM', - 'GJV8hxcjpieuXmVz9j5JjB2eFLXmRoBF7EYWpbMNKq7Q'), - ('6vv2FyJcTNJRnEmsRgHai5Bf7trJ8CsBMqbZckXWkbGk', - '5YXtgt3ZVKKYM3qvHXXKKSpStfH38akRYvg9shNNChWS'), - ('DKK6kfAGnLV1mowm9m52yYFViVbQfVEtmRuveiXbnC93', - 'YvrVGNzxXSTLQ5QQJ3GHWHDQJnd3qJ5npGQQvZtb4m1'), - ('4QWSQeeu9oQA3ZQG7d6LKzZLR3YZ79q999Zzb7hb2cbh', - '42ARr6nFsZXLAgGGwZ5p55kVSW5ETjrnJBUxaV6sFmzk'), - ('43oJ9CvF3Wsymj8zrkC19VfzjMiwntw3AXrTvc2UFuuf', - 'A661APGeLXuLgYUwmQjKWnuz1XmjuLNW8XVGuGjmEm76'), - ('3uN8UwhNcg219uX1GffC3a9tCZrVY327ZUk5rs3YfAR2', - 'Ca5B2Z9PAeBkEPuYeUyvs3dHhTqpAzFuXERfHZT3zxto'), - ('HuV5FPtboYQe2EEVFVhBkjRxbUBjeBCHRk2VuiNnBS7N', - '5AJCbvgfLmdGdWKjLpDBZtrrJC6NNCQJK5P9NmpvbByy'), - ('2Rbr8Lasv1CDhL2Xxu5ZfLHf4fhCfxuTr25YDB2Q5VXN', - 'FQTbtsHjw1oYyKF3pUamwubB27UqG1ista1ezL2kgF3N'), - ('CLGF2xs7YyJrNZ8ertsPwofzqTBfQiJ5cMiRNcMjgEkh', - '4uSue7UmSr1H8QCYrerRRyUh2BTqX5t5qPWRdVrcyL43'), - ('o6jUu8mqTQMaawxRBbvuWd3b7syXYEUPFWJGuNuoDs6', - '7uJuBMMZD3d6mq2ihUtJQLWsAqACAkmQSJ3gUcEgW18W'), - ('2wo2o5rqEEyijwm4MuCXHNVp2oJPEYQBF2eU6CoXYuVy', - 'AZY2HCpLGjsUgKo7PZ6gdx6btReR6gRCeE9gmzebgGZ2'), - ('Eo1z9xyGbHZxH1ezG7iLxJFhuL8YWJ6NREu4T2VtRZky', - 'GbjDtbwPBf6pcczRbANBvHeBNb3obMtEMoQTxmmafq2g'), - ('8oPaUg1Wc7293c8HR7Phs4m4DvzDjYuzFUBqffJUhJKP', - '9vJKX3jgc1K4sdhnVYLhU6iv7vf8mRygRDYr784mYUpp'), - ('K2BCZLghAwL4Y9eiPboQM2sz4GWYFM5WApZT6firnig', - '7j9QMXcyqgeVFejyNMhXszKAbZuNdECFYwZFDNCwHN3V'), - ('Dz8Ft3YeeuMcsPKMWNqDDbdx6Qo2s2H2cZNUoX2uDwgY', - '3HqEP9EvU9852orfSh9WZd54pDMJnT5nMnGkjhZibbZg'), - ('3cq9D9s9vZgyDertxiZr21etinCYKCMYcf3LXe3o8zT4', - '5174KhHkMsti7XNSYh5j1jFEv22PHQQizTXxT7gT2ZPb'), - ('5uJwmzmoZDkADaeyenBvceP4mSzBgEgbqU5cc8JQpTDE', - 'HEYiTYWaTwjXkzfbE4eZ1RL78ciJkWqEio8tDTvCXzk8'), - ('BkHzLwC5bkLVB4b5KPAqbWc4ekhqmMtk34tfYpLQ53KR', - '537uFsVdCU81kSG7eUZBFV5q3PvadsS4KgzaLuGWGzgG'), - ('3eQT6nC2BEqtXa5b5dn51cJEpj4eMHYsx7RkHXfwNEkq', - '2NV1QhXppRfj19ZemqGUgxZ9Pd5yD13aQmrcNd6g25D4'), - ('GsBGHmKMiJoYDhoXJXwUnkbH4cVWWQ7emG1t7vTFDdS', - 'CsLyGG9J9E4ZLwhpTHRgp21tvGWyPj79SaLGEpqVhHKj'), - ('ALytZ6ygpy3hqHVXGHHdNuzuQh1hSoTVU8im5C6CgTR2', - '5646BEZkpyoDWQHMscMav8bXoiAzf7giVmu8yepWsoMN'), - ('5XhJnzEfqVRM6trhL19K1AoGAQjbWC84Cv5XZ4nE9fF7', - 'BJdQwVTx2fuJWkStt3yPD2WUeopjV3yPQp1646Yi2pXL'), - ('7XLiDAjnggSU7PAvrTwsyPebC3bhuc5B2CMdiYAQBGWZ', - '8xnXGiNp1ADNfuG6uLQ91h2h1ekjuiEC5SRdw19rbpnq'), - ('7kyFUtCcaiWKfGZmWfb9kvwcYLxxmocBC7qXYwNwotgV', - '574EqNs3exLKJxgqFxKyLE5XQMBkadQf5MKQ8qpjsVJS'), - ('ESJSEPbWb13NaDkde8rEdcippc58AMCZodfmJP1SK16m', - '5iwWfDDjgyFfeLpS9EYmwszScwtxTACcgAbinCjFLZTZ'), - ('AjnWLT2vZnEmLfioGeseLuxGQFFiyoqtFJj2oEUgzax5', - '9JeUGkGHPyB7s7XVVik1aFyCxarH2tWhpSJapnRXveb8'), - ('32yM1jbRpZt7EjnH2UDimusAPfMQ83Wd1AULxLYMv2hq', - '73v6uEUhL12MEwdfFFDmqbWmSQXoC8Y3VPB9vKUYEW5X'), - ('F5DjMdHvqqym53MtBG1v9shrza74EttHn1zPFL1ic1hT', - 'FpkXbvZsW4LbU4XZYvy6euR7F9SxDMPdyVVCfJFUaT2C'), - ('3EPdMUSAXFuQLaVwq1fPHNUPzvSHXqfNupgu6kGhdEVc', - '28RxZbx71Y8ZaYt9f9D2HnAhkH2CvAPT4PXpDgCnXhVY'), - ('47YXW4Escn71q7xf6qip8NwdKTq2ScL1i4xmAnJ1RvDW', - '3NQxT4ukLvPPZV3J6qDmx5PFPa7GvaiMBwc1r47SXdfj'), - ('AiCfcc6viFsxTxfEJxo82b3GWzim2nRXvBBfB14w4dMr', - 'FBCcBLpFUss64MWjf3nuSRrLNoqnWpJGfXKJVaduPezJ'), - ('CkeGi1XM3nquJcp3osb2EhTJ99gsisPfTpnsQdYViWWa', - '4L12aHJtN96XGrYbhBFhmEQuPTnsHu95NATsz3X2Uo4Z'), - ('A78PS3MuQtWQ937ow5mzHhXUS1LNSzX2nMcmqLN57c3G', - '87T6viSDWX7Rrw2VWsqEXhwVmrsrmf2rjDHRkeUGU4rX'), - ('2SzYHP21J4KXwVgSwtNfDQKUbyC7RE8feAwfVuW7PSmD', - '4NCA5NxnhxPAAcWqyxtg4us7MJYSbn8g3Kw6v35Vmnm5'), - ('GxGuWY5A1ADiXFrdCiAcVJX4foveGxDfhcJd2Yirg3D8', - '2Jjo3w5gQ7TsQaN2N7iNejfGLjzucaNg4hYZBcwT7AzC'), - ('5dYeKTvxfH6s9Esbys8TVMDTZMCzjFJAH4xe623ykmZ2', - '5q7Le5Kcm1eBY1r8XwEseDXnEUKkZE5qtNb6p5BSSKwz'), - ('EkbeQ7eoiHxiTmq7ksw6FLvf59b3pGuoDR9LF29KYw4m', - 'CDpJ8VmgiBvYUcZMcPYr3B5UxSVEtLxRfq5dH3AxboNT'), - ('2zXT2EUMwWKPMWHK5rYvxgLNdmkoedXH754uzUBphaCE', - '5oHnEFaUaM1QRZjV48K1DrqKeEdcbmb8uG2zucTYc5qH'), - ('H6c78e97srwPEg5PsW1uuKAovSxTvmNyFt9qJwoeJP4y', - 'inwncuMiPRuw6PEucVG2Kempk91yq3dT5kpuf3Umf4j'), - ('6yJDrenNeRBpdQxqxMY3C2V6cBrfvpzYpz6MbefxuxsZ', - 'CnCjmTECDrqJP5nTPSL2NWJ9LPyyFzLmrTYiRcSjwU7e'), - ('3YTX3ntzsjG9CxbkCayToGEzmn1Fgdvw1W8gefCUTa9L', - 'FkCbQBoKRZbndsNP44CWheEchwPC65UNdrZ8FntRTyvu'), - ('8Y7xgZ5M8qBYdX5iCHe7mPQ6ZcQNXDJd28ZVDdx7FSBa', - 'AYTdxj598H36RGmBzEnR4QK8pVF6k5YTRBypxWsDkXUB'), - ('AtzLLpKuPehdP4g6x4J4BH2RjNbvXewxf8ibSgKSiJtL', - 'vC8C3u71YueJcUhtyfn9Xx5PjpJuizDZNGW23tFb5VY'), - ] diff --git a/deploy-cluster-aws/launch_ec2_nodes.py b/deploy-cluster-aws/launch_ec2_nodes.py deleted file mode 100644 index 1c50f895..00000000 --- a/deploy-cluster-aws/launch_ec2_nodes.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- -"""This script: -0. allocates more elastic IP addresses if necessary, -1. launches the specified number of nodes (instances) on Amazon EC2, -2. tags them with the specified tag, -3. waits until those instances exist and are running, -4. for each instance, it associates an elastic IP address - with that instance, -5. writes the shellscript add2known_hosts.sh -6. (over)writes a file named hostlist.py - containing a list of all public DNS names. -7. (over)writes a file named ssh_key.py - containing the location of the private SSH key file. -""" - -from __future__ import unicode_literals -from os.path import expanduser -import sys -import time -import socket -import argparse -import importlib -import botocore -import boto3 - -from awscommon import get_naeips - - -SETTINGS = ['NUM_NODES', 'BRANCH', 'SSH_KEY_NAME', - 'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'SECURITY_GROUP', - 'USING_EBS', 'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED', - 'ENABLE_WEB_ADMIN', 'BIND_HTTP_TO_LOCALHOST'] - - -class SettingsTypeError(TypeError): - pass - - -# Ensure they're using Python 2.5-2.7 -pyver = sys.version_info -major = pyver[0] -minor = pyver[1] -print('You are in an environment where "python" is Python {}.{}'. - format(major, minor)) -if not ((major == 2) and (minor >= 5) and (minor <= 7)): - print('but Fabric only works with Python 2.5-2.7') - sys.exit(1) - -# Parse the command-line arguments -parser = argparse.ArgumentParser() -parser.add_argument("--tag", - help="tag to add to all launched instances on AWS", - required=True) -parser.add_argument("--deploy-conf-file", - help="AWS deployment configuration file", - required=True) -args = parser.parse_args() -tag = args.tag -deploy_conf_file = args.deploy_conf_file - -# Import all the variables set in the AWS deployment configuration file -# (Remove the '.py' from the end of deploy_conf_file.) -cf = importlib.import_module(deploy_conf_file[:-3]) - -dir_cf = dir(cf) # = a list of the attributes of cf -for setting in SETTINGS: - if setting not in dir_cf: - sys.exit('{} was not set '.format(setting) + - 'in the specified AWS deployment ' - 'configuration file {}'.format(deploy_conf_file)) - exec('{0} = cf.{0}'.format(setting)) - -# Validate the variables set in the AWS deployment configuration file -if not isinstance(NUM_NODES, int): - raise SettingsTypeError('NUM_NODES should be an int') - -if not isinstance(BRANCH, str): - raise SettingsTypeError('BRANCH should be a string') - -if not isinstance(SSH_KEY_NAME, str): - raise SettingsTypeError('SSH_KEY_NAME should be a string') - -if not isinstance(USE_KEYPAIRS_FILE, bool): - msg = 'USE_KEYPAIRS_FILE should be a boolean (True or False)' - raise SettingsTypeError(msg) - -if not isinstance(IMAGE_ID, str): - raise SettingsTypeError('IMAGE_ID should be a string') - -if not isinstance(INSTANCE_TYPE, str): - raise SettingsTypeError('INSTANCE_TYPE should be a string') - -if not isinstance(SECURITY_GROUP, str): - raise SettingsTypeError('SECURITY_GROUP should be a string') - -if not isinstance(USING_EBS, bool): - raise SettingsTypeError('USING_EBS should be a boolean (True or False)') - -if not isinstance(EBS_VOLUME_SIZE, int): - raise SettingsTypeError('EBS_VOLUME_SIZE should be an int') - -if not isinstance(EBS_OPTIMIZED, bool): - raise SettingsTypeError('EBS_OPTIMIZED should be a boolean (True or False)') - -if not isinstance(ENABLE_WEB_ADMIN, bool): - raise SettingsTypeError('ENABLE_WEB_ADMIN should be a boolean (True or False)') - -if not isinstance(BIND_HTTP_TO_LOCALHOST, bool): - raise SettingsTypeError('BIND_HTTP_TO_LOCALHOST should be a boolean ' - '(True or False)') - -if NUM_NODES > 64: - raise ValueError('NUM_NODES should be less than or equal to 64. ' - 'The AWS deployment configuration file sets it to {}'. - format(NUM_NODES)) - -if SSH_KEY_NAME in ['not-set-yet', '', None]: - raise ValueError('SSH_KEY_NAME should be set. ' - 'The AWS deployment configuration file sets it to {}'. - format(SSH_KEY_NAME)) - -# Since we assume 'gp2' volumes (for now), the possible range is 1 to 16384 -if EBS_VOLUME_SIZE > 16384: - raise ValueError('EBS_VOLUME_SIZE should be <= 16384. ' - 'The AWS deployment configuration file sets it to {}'. - format(EBS_VOLUME_SIZE)) - -# Get an AWS EC2 "resource" -# See http://boto3.readthedocs.org/en/latest/guide/resources.html -ec2 = boto3.resource(service_name='ec2') - -# Create a client from the EC2 resource -# See http://boto3.readthedocs.org/en/latest/guide/clients.html -client = ec2.meta.client - -# Ensure they don't already have some instances with the specified tag -# Get a list of all instances with the specified tag. -# (Technically, instances_with_tag is an ec2.instancesCollection.) -filters = [{'Name': 'tag:Name', 'Values': [tag]}] -instances_with_tag = ec2.instances.filter(Filters=filters) -# len() doesn't work on instances_with_tag. This does: -num_ins = 0 -for instance in instances_with_tag: - num_ins += 1 -if num_ins != 0: - print('You already have {} instances with the tag {} on EC2.'. - format(num_ins, tag)) - print('You should either pick a different tag or ' - 'terminate all those instances and ' - 'wait until they vanish from your EC2 Console.') - sys.exit(1) - -# Before launching any instances, make sure they have sufficient -# allocated-but-unassociated EC2 elastic IP addresses -print('Checking if you have enough allocated-but-unassociated ' + - 'EC2 elastic IP addresses...') - -non_associated_eips = get_naeips(client) - -print('You have {} allocated elastic IPs which are ' - 'not already associated with instances'. - format(len(non_associated_eips))) - -if NUM_NODES > len(non_associated_eips): - num_eips_to_allocate = NUM_NODES - len(non_associated_eips) - print('You want to launch {} instances'. - format(NUM_NODES)) - print('so {} more elastic IPs must be allocated'. - format(num_eips_to_allocate)) - for _ in range(num_eips_to_allocate): - try: - # Allocate an elastic IP address - # response is a dict. See http://tinyurl.com/z2n7u9k - response = client.allocate_address(DryRun=False, Domain='standard') - except botocore.exceptions.ClientError: - print('Something went wrong when allocating an ' - 'EC2 elastic IP address on EC2. ' - 'Maybe you are already at the maximum number allowed ' - 'by your AWS account? More details:') - raise - except: - print('Unexpected error:') - raise - -print('Commencing launch of {} instances on Amazon EC2...'. - format(NUM_NODES)) - -sg_list = [SECURITY_GROUP] - -for _ in range(NUM_NODES): - # Request the launch of one instance at a time - # (so list_of_instances should contain only one item) - # See https://tinyurl.com/hbjewbb - if USING_EBS: - dm = { - 'DeviceName': '/dev/sdp', - # Why /dev/sdp? See https://tinyurl.com/z2zqm6n - 'Ebs': { - 'VolumeSize': EBS_VOLUME_SIZE, # GiB - 'DeleteOnTermination': False, - 'VolumeType': 'gp2', - 'Encrypted': False - }, - # 'NoDevice': 'device' - # Suppresses the specified device included - # in the block device mapping of the AMI. - } - list_of_instances = ec2.create_instances( - ImageId=IMAGE_ID, - MinCount=1, - MaxCount=1, - KeyName=SSH_KEY_NAME, - InstanceType=INSTANCE_TYPE, - SecurityGroupIds=sg_list, - BlockDeviceMappings=[dm], - EbsOptimized=EBS_OPTIMIZED - ) - else: # not USING_EBS - list_of_instances = ec2.create_instances( - ImageId=IMAGE_ID, - MinCount=1, - MaxCount=1, - KeyName=SSH_KEY_NAME, - InstanceType=INSTANCE_TYPE, - SecurityGroupIds=sg_list - ) - - # Tag the just-launched instances (should be just one) - for instance in list_of_instances: - time.sleep(5) - instance.create_tags(Tags=[{'Key': 'Name', 'Value': tag}]) - -# Get a list of all instances with the specified tag. -# (Technically, instances_with_tag is an ec2.instancesCollection.) -filters = [{'Name': 'tag:Name', 'Values': [tag]}] -instances_with_tag = ec2.instances.filter(Filters=filters) -print('The launched instances will have these ids:'.format(tag)) -for instance in instances_with_tag: - print(instance.id) - -print('Waiting until all those instances exist...') -for instance in instances_with_tag: - instance.wait_until_exists() - -print('Waiting until all those instances are running...') -for instance in instances_with_tag: - instance.wait_until_running() - -print('Associating allocated-but-unassociated elastic IPs ' + - 'with the instances...') - -# Get a list of elastic IPs which are allocated but -# not associated with any instances. -# There should be enough because we checked earlier and -# allocated more if necessary. -non_associated_eips_2 = get_naeips(client) - -for i, instance in enumerate(instances_with_tag): - print('Grabbing an allocated but non-associated elastic IP...') - eip = non_associated_eips_2[i] - public_ip = eip['PublicIp'] - print('The public IP address {}'.format(public_ip)) - - # Associate that Elastic IP address with an instance - response2 = client.associate_address( - DryRun=False, - InstanceId=instance.instance_id, - PublicIp=public_ip - ) - print('was associated with the instance with id {}'. - format(instance.instance_id)) - -# Get a list of the pubic DNS names of the instances_with_tag -public_dns_names = [] -for instance in instances_with_tag: - public_dns_name = getattr(instance, 'public_dns_name', None) - if public_dns_name is not None: - public_dns_names.append(public_dns_name) - -# Write a shellscript to add remote keys to ~/.ssh/known_hosts -print('Preparing shellscript to add remote keys to known_hosts') -with open('add2known_hosts.sh', 'w') as f: - f.write('#!/bin/bash\n') - for public_dns_name in public_dns_names: - f.write('ssh-keyscan ' + public_dns_name + ' >> ~/.ssh/known_hosts\n') - -# Create a file named hostlist.py containing public_dns_names. -# If a hostlist.py already exists, it will be overwritten. -print('Writing hostlist.py') -with open('hostlist.py', 'w') as f: - f.write('# -*- coding: utf-8 -*-\n') - f.write('"""A list of the public DNS names of all the nodes in this\n') - f.write('BigchainDB cluster.\n') - f.write('"""\n') - f.write('\n') - f.write('from __future__ import unicode_literals\n') - f.write('\n') - f.write('public_dns_names = {}\n'.format(public_dns_names)) - -# Create a file named ssh_key.py -# containing the location of the private SSH key file. -# If a ssh_key.py already exists, it will be overwritten. -print('Writing ssh_key.py') -with open('ssh_key.py', 'w') as f: - f.write('# -*- coding: utf-8 -*-\n') - f.write('"""This file exists as a convenient way for Fabric to get\n') - f.write('the location of the private SSH key file.') - f.write('"""\n') - f.write('\n') - f.write('from __future__ import unicode_literals\n') - f.write('\n') - home = expanduser('~') - f.write('ssh_key_path = "{}/.ssh/{}"\n'.format(home, SSH_KEY_NAME)) - -# For each node in the cluster, check port 22 (ssh) until it's reachable -for instance in instances_with_tag: - ip_address = instance.public_ip_address - # Create a socket - # Address Family: AF_INET (means IPv4) - # Type: SOCK_STREAM (means connection-oriented TCP protocol) - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - print('Attempting to connect to {} on port 22 (ssh)...'. - format(ip_address)) - unreachable = True - while unreachable: - try: - # Open a connection to the remote node on port 22 - s.connect((ip_address, 22)) - except socket.error as e: - print(' Socket error: {}'.format(e)) - print(' Trying again in 3 seconds') - time.sleep(3.0) - else: - print(' Port 22 is reachable!') - s.shutdown(socket.SHUT_WR) - s.close() - unreachable = False diff --git a/deploy-cluster-aws/make_confiles.sh b/deploy-cluster-aws/make_confiles.sh deleted file mode 100755 index 052ecaf0..00000000 --- a/deploy-cluster-aws/make_confiles.sh +++ /dev/null @@ -1,38 +0,0 @@ -#! /bin/bash - -set -euo pipefail - -function printErr() - { - echo "usage: ./make_confiles.sh " - echo "No argument $1 supplied" - } - -if [ -z "$1" ]; then - printErr "" - exit 1 -fi - -if [ -z "$2" ]; then - printErr "" - exit 1 -fi - -CONFDIR=$1 -NUMFILES=$2 - -# If $CONFDIR exists, remove it -if [ -d "$CONFDIR" ]; then - rm -rf $CONFDIR -fi - -# Create $CONFDIR -mkdir $CONFDIR - -# Use the bigchaindb configure command to create -# $NUMFILES BigchainDB config files in $CONFDIR -for (( i=0; i<$NUMFILES; i++ )); do - CONPATH=$CONFDIR"/bcdb_conf"$i - echo "Writing "$CONPATH - bigchaindb -y -c $CONPATH configure rethinkdb -done diff --git a/deploy-cluster-aws/release_eips.py b/deploy-cluster-aws/release_eips.py deleted file mode 100644 index a3e1f855..00000000 --- a/deploy-cluster-aws/release_eips.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -"""Release all allocated but non-associated elastic IP addresses -(EIPs). Why? From the AWS docs: - -``To ensure efficient use of Elastic IP addresses, we impose a small -hourly charge if an Elastic IP address is not associated with a -running instance, or if it is associated with a stopped instance or -an unattached network interface. While your instance is running, -you are not charged for one Elastic IP address associated with the -instance, but you are charged for any additional Elastic IP -addresses associated with the instance. For more information, see -Amazon EC2 Pricing.'' - -Source: http://tinyurl.com/ozhxatx -""" - -from __future__ import unicode_literals -import boto3 -from awscommon import get_naeips - -# Get an AWS EC2 "resource" -# See http://boto3.readthedocs.org/en/latest/guide/resources.html -ec2 = boto3.resource(service_name='ec2') - -# Create a client from the EC2 resource -# See http://boto3.readthedocs.org/en/latest/guide/clients.html -client = ec2.meta.client - -non_associated_eips = get_naeips(client) - -print('You have {} allocated elactic IPs which are ' - 'not associated with instances'. - format(len(non_associated_eips))) - -for i, eip in enumerate(non_associated_eips): - public_ip = eip['PublicIp'] - print('{}: Releasing {}'.format(i, public_ip)) - domain = eip['Domain'] - print('(It has Domain = {}.)'.format(domain)) - if domain == 'vpc': - client.release_address(AllocationId=eip['AllocationId']) - else: - client.release_address(PublicIp=public_ip) diff --git a/deploy-cluster-aws/write_keypairs_file.py b/deploy-cluster-aws/write_keypairs_file.py deleted file mode 100644 index d2fda508..00000000 --- a/deploy-cluster-aws/write_keypairs_file.py +++ /dev/null @@ -1,49 +0,0 @@ -"""A Python 3 script to write a file with a specified number -of keypairs, using bigchaindb.common.crypto.generate_key_pair() -The written file is always named keypairs.py and it should be -interpreted as a Python 2 script. - -Usage: - $ python3 write_keypairs_file.py num_pairs - -Using the list in other Python scripts: - # in a Python 2 script: - from keypairs import keypairs_list - # keypairs_list is a list of (sk, pk) tuples - # sk = private key - # pk = public key -""" - -import argparse - -from bigchaindb.common import crypto - - -# Parse the command-line arguments -desc = 'Write a set of keypairs to keypairs.py' -parser = argparse.ArgumentParser(description=desc) -parser.add_argument('num_pairs', - help='number of keypairs to write', - type=int) -args = parser.parse_args() -num_pairs = int(args.num_pairs) - -# Generate and write the keypairs to keypairs.py -print('Writing {} keypairs to keypairs.py...'.format(num_pairs)) -with open('keypairs.py', 'w') as f: - f.write('# -*- coding: utf-8 -*-\n') - f.write('"""A set of keypairs for use in deploying\n') - f.write('BigchainDB servers with a predictable set of keys.\n') - f.write('"""\n') - f.write('\n') - f.write('from __future__ import unicode_literals\n') - f.write('\n') - f.write('keypairs_list = [') - for pair_num in range(num_pairs): - keypair = crypto.generate_key_pair() - spacer = '' if pair_num == 0 else ' ' - f.write("{}('{}',\n '{}'),\n".format( - spacer, keypair[0], keypair[1])) - f.write(' ]\n') - -print('Done.') diff --git a/docs/server/source/appendices/aws-testing-cluster.md b/docs/server/source/appendices/aws-testing-cluster.md deleted file mode 100644 index 497d20a2..00000000 --- a/docs/server/source/appendices/aws-testing-cluster.md +++ /dev/null @@ -1,207 +0,0 @@ -# Deploy a RethinkDB-Based Testing Cluster on AWS - -This section explains a way to deploy a _RethinkDB-based_ cluster of BigchainDB nodes on Amazon Web Services (AWS) for testing purposes. - -## Why? - -Why would anyone want to deploy a centrally-controlled BigchainDB cluster? Isn't BigchainDB supposed to be decentralized, where each node is controlled by a different person or organization? - -Yes! These scripts are for deploying a testing cluster, not a production cluster. - -## How? - -We use some Bash and Python scripts to launch several instances (virtual servers) on Amazon Elastic Compute Cloud (EC2). Then we use Fabric to install RethinkDB and BigchainDB on all those instances. - -## Python Setup - -The instructions that follow have been tested on Ubuntu 16.04. Similar instructions should work on similar Linux distros. - -**Note: Our Python scripts for deploying to AWS use Python 2 because Fabric doesn't work with Python 3.** - -You must install the Python package named `fabric`, but it depends on the `cryptography` package, and that depends on some OS-level packages. On Ubuntu 16.04, you can install those OS-level packages using: -```text -sudo apt-get install build-essential libssl-dev libffi-dev python-dev -``` - -For other operating systems, see [the installation instructions for the `cryptography` package](https://cryptography.io/en/latest/installation/). - -Maybe create a Python 2 virtual environment and activate it. Then install the following Python packages (in that virtual environment): -```text -pip install fabric fabtools requests boto3 awscli -``` - -What did you just install? - -* "[Fabric](http://www.fabfile.org/) is a Python (2.5-2.7) library and command-line tool for streamlining the use of SSH for application deployment or systems administration tasks." -* [fabtools](https://github.com/fabtools/fabtools) are "tools for writing awesome Fabric files" -* [requests](http://docs.python-requests.org/en/master/) is a Python package/library for sending HTTP requests -* "[Boto](https://boto3.readthedocs.io/en/latest/) is the Amazon Web Services (AWS) SDK for Python, which allows Python developers to write software that makes use of Amazon services like S3 and EC2." (`boto3` is the name of the latest Boto package.) -* [The aws-cli package](https://pypi.python.org/pypi/awscli), which is an AWS Command Line Interface (CLI). - - -## Setting up in AWS - -See the page about [basic AWS Setup](../appendices/aws-setup.html) in the Appendices. - - -## Get Enough Amazon Elastic IP Addresses - -The AWS cluster deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html). - -## Create an Amazon EC2 Security Group - -Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. You can name it whatever you like. (Notes: The default name in the example AWS deployment configuration file is `bigchaindb`. We had problems with names containing dashes.) The description should be something to help you remember what the security group is for. - -For a super lax, somewhat risky, anything-can-enter security group, add these rules for Inbound traffic: - -* Type = All TCP, Protocol = TCP, Port Range = 0-65535, Source = 0.0.0.0/0 -* Type = SSH, Protocol = SSH, Port Range = 22, Source = 0.0.0.0/0 -* Type = All UDP, Protocol = UDP, Port Range = 0-65535, Source = 0.0.0.0/0 -* Type = All ICMP, Protocol = ICMP, Port Range = 0-65535, Source = 0.0.0.0/0 - -(Note: Source = 0.0.0.0/0 is [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for "allow this traffic to come from _any_ IP address.") - -If you want to set up a more secure security group, see the [Notes for Firewall Setup](../appendices/firewall-notes.html). - - -## Deploy a BigchainDB Cluster - -### Step 1 - -Suppose _N_ is the number of nodes you want in your BigchainDB cluster. If you already have a set of _N_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory, then you can jump to the next step. To create such a set, you can do something like: -```text -# in a Python 3 virtual environment where bigchaindb is installed -cd bigchaindb -cd deploy-cluster-aws -./make_confiles.sh confiles 3 -``` - -That will create three (3) _default_ BigchainDB configuration files in the `deploy-cluster-aws/confiles` directory (which will be created if it doesn't already exist). The three files will be named `bcdb_conf0`, `bcdb_conf1`, and `bcdb_conf2`. - -You can look inside those files if you're curious. For example, the default keyring is an empty list. Later, the deployment script automatically changes the keyring of each node to be a list of the public keys of all other nodes. Other changes are also made. That is, the configuration files generated in this step are _not_ what will be sent to the deployed nodes; they're just a starting point. - -### Step 2 - -Step 2 is to make an AWS deployment configuration file, if necessary. There's an example AWS configuration file named `example_deploy_conf.py`. It has many comments explaining each setting. The settings in that file are (or should be): -```text -NUM_NODES=3 -BRANCH="master" -SSH_KEY_NAME="not-set-yet" -USE_KEYPAIRS_FILE=False -IMAGE_ID="ami-8504fdea" -INSTANCE_TYPE="t2.medium" -SECURITY_GROUP="bigchaindb" -USING_EBS=True -EBS_VOLUME_SIZE=30 -EBS_OPTIMIZED=False -ENABLE_WEB_ADMIN=True -BIND_HTTP_TO_LOCALHOST=True -``` - -Make a copy of that file and call it whatever you like (e.g. `cp example_deploy_conf.py my_deploy_conf.py`). You can leave most of the settings at their default values, but you must change the value of `SSH_KEY_NAME` to the name of your private SSH key. You can do that with a text editor. Set `SSH_KEY_NAME` to the name you used for `` when you generated an RSA key pair for SSH (in basic AWS setup). - -You'll also want to change the `IMAGE_ID` to one that's up-to-date and available in your AWS region. If you don't remember your AWS region, then look in your `$HOME/.aws/config` file. You can find an up-to-date Ubuntu image ID for your region at [https://cloud-images.ubuntu.com/locator/ec2/](https://cloud-images.ubuntu.com/locator/ec2/). An example search string is "eu-central-1 16.04 LTS amd64 hvm:ebs-ssd". You should replace "eu-central-1" with your region name. - -If you want your nodes to have a predictable set of pre-generated keypairs, then you should 1) set `USE_KEYPAIRS_FILE=True` in the AWS deployment configuration file, and 2) provide a `keypairs.py` file containing enough keypairs for all of your nodes. You can generate a `keypairs.py` file using the `write_keypairs_file.py` script. For example: -```text -# in a Python 3 virtual environment where bigchaindb is installed -cd bigchaindb -cd deploy-cluster-aws -python3 write_keypairs_file.py 100 -``` - -The above command generates a `keypairs.py` file with 100 keypairs. You can generate more keypairs than you need, so you can use the same list over and over again, for different numbers of servers. The deployment scripts will only use the first NUM_NODES keypairs. - -### Step 3 - -Step 3 is to launch the nodes ("instances") on AWS, to install all the necessary software on them, configure the software, run the software, and more. Here's how you'd do that: - -```text -# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed -cd bigchaindb -cd deploy-cluster-aws -./awsdeploy.sh my_deploy_conf.py -# Only if you want to set the replication factor to 3 -fab set_replicas:3 -# Only if you want to start BigchainDB on all the nodes: -fab start_bigchaindb -``` - -`awsdeploy.sh` is a Bash script which calls some Python and Fabric scripts. If you're curious what it does, [the source code](https://github.com/bigchaindb/bigchaindb/blob/master/deploy-cluster-aws/awsdeploy.sh) has many explanatory comments. - -It should take a few minutes for the deployment to finish. If you run into problems, see the section on **Known Deployment Issues** below. - -The EC2 Console has a section where you can see all the instances you have running on EC2. You can `ssh` into a running instance using a command like: -```text -ssh -i pem/bigchaindb.pem ubuntu@ec2-52-29-197-211.eu-central-1.compute.amazonaws.com -``` - -except you'd replace the `ec2-52-29-197-211.eu-central-1.compute.amazonaws.com` with the public DNS name of the instance you want to `ssh` into. You can get that from the EC2 Console: just click on an instance and look in its details pane at the bottom of the screen. Some commands you might try: -```text -ip addr show -sudo service rethinkdb status -bigchaindb --help -bigchaindb show-config -``` - -If you enabled the RethinkDB web interface (by setting `ENABLE_WEB_ADMIN=True` in your AWS configuration file), then you can also check that. The way to do that depends on how `BIND_HTTP_TO_LOCALHOST` was set (in your AWS deployment configuration file): - -* If it was set to `False`, then just go to your web browser and visit a web address like `http://ec2-52-29-197-211.eu-central-1.compute.amazonaws.com:8080/`. (Replace `ec2-...aws.com` with the hostname of one of your instances.) -* If it was set to `True` (the default in the example config file), then follow the instructions in the "Via a SOCKS proxy" section of [the "Secure your cluster" page of the RethinkDB documentation](https://www.rethinkdb.com/docs/security/). - - -## Server Monitoring with New Relic - -[New Relic](https://newrelic.com/) is a business that provides several monitoring services. One of those services, called Server Monitoring, can be used to monitor things like CPU usage and Network I/O on BigchainDB instances. To do that: - -1. Sign up for a New Relic account -2. Get your New Relic license key -3. Put that key in an environment variable named `NEWRELIC_KEY`. For example, you might add a line like the following to your `~/.bashrc` file (if you use Bash): `export NEWRELIC_KEY=` -4. Once you've deployed a BigchainDB cluster on AWS as above, you can install a New Relic system monitor (agent) on all the instances using: - -```text -# in a Python 2.5-2.7 virtual environment where fabric, boto3, etc. are installed -fab install_newrelic -``` - -Once the New Relic system monitor (agent) is installed on the instances, it will start sending server stats to New Relic on a regular basis. It may take a few minutes for data to show up in your New Relic dashboard (under New Relic Servers). - -## Shutting Down a Cluster - -There are fees associated with running instances on EC2, so if you're not using them, you should terminate them. You can do that using the AWS EC2 Console. - -The same is true of your allocated elastic IP addresses. There's a small fee to keep them allocated if they're not associated with a running instance. You can release them using the AWS EC2 Console, or by using a handy little script named `release_eips.py`. For example: -```text -$ python release_eips.py -You have 2 allocated elactic IPs which are not associated with instances -0: Releasing 52.58.110.110 -(It has Domain = vpc.) -1: Releasing 52.58.107.211 -(It has Domain = vpc.) -``` - -## Known Deployment Issues - -### NetworkError - -If you tested with a high sequence it might be possible that you run into an error message like this: -```text -NetworkError: Host key for ec2-xx-xx-xx-xx.eu-central-1.compute.amazonaws.com -did not match pre-existing key! Server's key was changed recently, or possible -man-in-the-middle attack. -``` - -If so, just clean up your `known_hosts` file and start again. For example, you might copy your current `known_hosts` file to `old_known_hosts` like so: -```text -mv ~/.ssh/known_hosts ~/.ssh/old_known_hosts -``` - -Then terminate your instances and try deploying again with a different tag. - -### Failure of sudo apt-get update - -The first thing that's done on all the instances, once they're running, is basically [`sudo apt-get update`](http://askubuntu.com/questions/222348/what-does-sudo-apt-get-update-do). Sometimes that fails. If so, just terminate your instances and try deploying again with a different tag. (These problems seem to be time-bounded, so maybe wait a couple of hours before retrying.) - -### Failure when Installing Base Software - -If you get an error with installing the base software on the instances, then just terminate your instances and try deploying again with a different tag. diff --git a/docs/server/source/appendices/index.rst b/docs/server/source/appendices/index.rst index ae2052f0..a4ff150a 100755 --- a/docs/server/source/appendices/index.rst +++ b/docs/server/source/appendices/index.rst @@ -17,7 +17,6 @@ Appendices backend commands aws-setup - aws-testing-cluster azure-quickstart-template generate-key-pair-for-ssh firewall-notes diff --git a/docs/server/source/clusters.md b/docs/server/source/clusters.md index 52eb74de..331b5b3d 100644 --- a/docs/server/source/clusters.md +++ b/docs/server/source/clusters.md @@ -18,9 +18,8 @@ to some extent, on the decentralization of the associated consortium. See the pa There are some pages and sections that will be of particular interest to anyone building or managing a BigchainDB cluster. In particular: -* [the page about how to set up and run a cluster node](production-nodes/setup-run-node.html), -* [our production deployment template](production-deployment-template/index.html), and -* [our old RethinkDB-based AWS deployment template](appendices/aws-testing-cluster.html). +* [the page about how to set up and run a cluster node](production-nodes/setup-run-node.html) and +* [our production deployment template](production-deployment-template/index.html). ## Cluster DNS Records and SSL Certificates diff --git a/docs/server/source/introduction.md b/docs/server/source/introduction.md index 1ef47922..59817484 100644 --- a/docs/server/source/introduction.md +++ b/docs/server/source/introduction.md @@ -22,7 +22,6 @@ Note that there are a few kinds of nodes: There are some old RethinkDB-based deployment instructions as well: * [Deploy a bare-bones RethinkDB-based node on Azure](appendices/azure-quickstart-template.html) -* [Deploy a RethinkDB-based testing cluster on AWS](appendices/aws-testing-cluster.html) Instructions for setting up a client will be provided once there's a public test net.