mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
fixed merge commit in /prod-node-setup-mgmt/index.rst
This commit is contained in:
commit
00bfec8754
7
.gitignore
vendored
7
.gitignore
vendored
@ -65,12 +65,11 @@ target/
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# Private key files from AWS
|
||||
*.pem
|
||||
|
||||
# Some files created when deploying a cluster on AWS
|
||||
deploy-cluster-aws/conf/rethinkdb.conf
|
||||
deploy-cluster-aws/hostlist.py
|
||||
deploy-cluster-aws/confiles/
|
||||
deploy-cluster-aws/client_confile
|
||||
deploy-cluster-aws/hostlist.py
|
||||
deploy-cluster-aws/ssh_key.py
|
||||
benchmarking-tests/hostlist.py
|
||||
benchmarking-tests/ssh_key.py
|
||||
|
3
benchmarking-tests/fabfile.py
vendored
3
benchmarking-tests/fabfile.py
vendored
@ -7,6 +7,7 @@ from fabric.operations import run, put
|
||||
from fabric.context_managers import settings
|
||||
|
||||
from hostlist import public_dns_names
|
||||
from ssh_key import ssh_key_path
|
||||
|
||||
# Ignore known_hosts
|
||||
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
|
||||
@ -18,7 +19,7 @@ env.hosts = public_dns_names
|
||||
|
||||
# SSH key files to try when connecting:
|
||||
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
|
||||
env.key_filename = 'pem/bigchaindb.pem'
|
||||
env.key_filename = ssh_key_path
|
||||
|
||||
|
||||
@task
|
||||
|
@ -351,7 +351,8 @@ class Bigchain(object):
|
||||
def is_valid_transaction(self, transaction):
|
||||
"""Check whether a transacion is valid or invalid.
|
||||
|
||||
Similar to `validate_transaction` but does not raise an exception if the transaction is valid.
|
||||
Similar to `validate_transaction` but never raises an exception.
|
||||
It returns `False` if the transaction is invalid.
|
||||
|
||||
Args:
|
||||
transaction (dict): transaction to check.
|
||||
|
@ -31,6 +31,7 @@ coverage:
|
||||
- "bigchaindb/version.py"
|
||||
- "benchmarking-tests/*"
|
||||
- "speed-tests/*"
|
||||
- "ntools/*"
|
||||
|
||||
comment:
|
||||
# @stevepeak (from codecov.io) suggested we change 'suggestions' to 'uncovered'
|
||||
|
@ -26,9 +26,19 @@ fi
|
||||
# to set environment variables related to AWS deployment
|
||||
echo "Reading "$DEPLOY_CONF_FILE
|
||||
source $DEPLOY_CONF_FILE
|
||||
|
||||
# Check if SSH_KEY_NAME got set
|
||||
if [ "$SSH_KEY_NAME" == "not-set-yet" ] || \
|
||||
[ "$SSH_KEY_NAME" == "" ] || \
|
||||
[ -z ${SSH_KEY_NAME+x} ]; then
|
||||
echo "SSH_KEY_NAME was not set in that file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "NUM_NODES = "$NUM_NODES
|
||||
echo "BRANCH = "$BRANCH
|
||||
echo "WHAT_TO_DEPLOY = "$WHAT_TO_DEPLOY
|
||||
echo "SSH_KEY_NAME" = $SSH_KEY_NAME
|
||||
echo "USE_KEYPAIRS_FILE = "$USE_KEYPAIRS_FILE
|
||||
echo "IMAGE_ID = "$IMAGE_ID
|
||||
echo "INSTANCE_TYPE = "$INSTANCE_TYPE
|
||||
@ -38,9 +48,9 @@ if [ "$USING_EBS" = True ]; then
|
||||
echo "EBS_OPTIMIZED = "$EBS_OPTIMIZED
|
||||
fi
|
||||
|
||||
# Check for AWS private key file (.pem file)
|
||||
if [ ! -f "pem/bigchaindb.pem" ]; then
|
||||
echo "File pem/bigchaindb.pem (AWS private key) is missing"
|
||||
# Check for the SSH private key file
|
||||
if [ ! -f "$HOME/.ssh/$SSH_KEY_NAME" ]; then
|
||||
echo "The SSH private key file "$HOME"/.ssh/"$SSH_KEY_NAME" is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -70,9 +80,9 @@ fi
|
||||
TAG="BDB-"$WHAT_TO_DEPLOY"-"`date +%m-%d@%H:%M`
|
||||
echo "TAG = "$TAG
|
||||
|
||||
# Change the file permissions on pem/bigchaindb.pem
|
||||
# Change the file permissions on the SSH private key file
|
||||
# so that the owner can read it, but that's all
|
||||
chmod 0400 pem/bigchaindb.pem
|
||||
chmod 0400 $HOME/.ssh/$SSH_KEY_NAME
|
||||
|
||||
# The following Python script does these things:
|
||||
# 0. allocates more elastic IP addresses if necessary,
|
||||
@ -84,6 +94,8 @@ chmod 0400 pem/bigchaindb.pem
|
||||
# 5. writes the shellscript add2known_hosts.sh
|
||||
# 6. (over)writes a file named hostlist.py
|
||||
# containing a list of all public DNS names.
|
||||
# 7. (over)writes a file named ssh_key.py
|
||||
# containing the location of the private SSH key file.
|
||||
python launch_ec2_nodes.py --deploy-conf-file $DEPLOY_CONF_FILE --tag $TAG
|
||||
|
||||
# Make add2known_hosts.sh executable then execute it.
|
||||
@ -91,6 +103,10 @@ python launch_ec2_nodes.py --deploy-conf-file $DEPLOY_CONF_FILE --tag $TAG
|
||||
chmod +x add2known_hosts.sh
|
||||
./add2known_hosts.sh
|
||||
|
||||
# Test an SSH connection to one of the hosts
|
||||
# and prompt the user for their SSH password if necessary
|
||||
fab set_host:0 test_ssh
|
||||
|
||||
# Rollout base packages (dependencies) needed before
|
||||
# storage backend (RethinkDB) and BigchainDB can be rolled out
|
||||
fab install_base_software
|
||||
|
@ -27,6 +27,11 @@ BRANCH="master"
|
||||
# What do you want to deploy?
|
||||
WHAT_TO_DEPLOY="servers"
|
||||
|
||||
# SSH_KEY_NAME is the name of the SSH private key file
|
||||
# in $HOME/.ssh/
|
||||
# It is used for SSH communications with AWS instances.
|
||||
SSH_KEY_NAME="not-set-yet"
|
||||
|
||||
# USE_KEYPAIRS_FILE is either True or False
|
||||
# Should node keypairs be read from keypairs.py?
|
||||
# (If False, then the keypairs will be whatever is in the the
|
||||
|
8
deploy-cluster-aws/fabfile.py
vendored
8
deploy-cluster-aws/fabfile.py
vendored
@ -15,6 +15,7 @@ from fabric.operations import run, put
|
||||
from fabric.context_managers import settings
|
||||
|
||||
from hostlist import public_dns_names
|
||||
from ssh_key import ssh_key_path
|
||||
|
||||
# Ignore known_hosts
|
||||
# http://docs.fabfile.org/en/1.10/usage/env.html#disable-known-hosts
|
||||
@ -26,7 +27,7 @@ env.hosts = public_dns_names
|
||||
|
||||
# SSH key files to try when connecting:
|
||||
# http://docs.fabfile.org/en/1.10/usage/env.html#key-filename
|
||||
env.key_filename = 'pem/bigchaindb.pem'
|
||||
env.key_filename = ssh_key_path
|
||||
|
||||
|
||||
######################################################################
|
||||
@ -48,6 +49,11 @@ def set_host(host_index):
|
||||
env.hosts = [public_dns_names[int(host_index)]]
|
||||
|
||||
|
||||
@task
|
||||
def test_ssh():
|
||||
run('echo "If you see this, then SSH to a remote host worked."')
|
||||
|
||||
|
||||
# Install base software
|
||||
@task
|
||||
@parallel
|
||||
|
@ -9,9 +9,12 @@
|
||||
5. writes the shellscript add2known_hosts.sh
|
||||
6. (over)writes a file named hostlist.py
|
||||
containing a list of all public DNS names.
|
||||
7. (over)writes a file named ssh_key.py
|
||||
containing the location of the private SSH key file.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from os.path import expanduser
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
@ -23,9 +26,9 @@ import boto3
|
||||
from awscommon import get_naeips
|
||||
|
||||
|
||||
SETTINGS = ['NUM_NODES', 'BRANCH', 'WHAT_TO_DEPLOY', 'USE_KEYPAIRS_FILE',
|
||||
'IMAGE_ID', 'INSTANCE_TYPE', 'USING_EBS', 'EBS_VOLUME_SIZE',
|
||||
'EBS_OPTIMIZED']
|
||||
SETTINGS = ['NUM_NODES', 'BRANCH', 'WHAT_TO_DEPLOY', 'SSH_KEY_NAME',
|
||||
'USE_KEYPAIRS_FILE', 'IMAGE_ID', 'INSTANCE_TYPE', 'USING_EBS',
|
||||
'EBS_VOLUME_SIZE', 'EBS_OPTIMIZED']
|
||||
|
||||
|
||||
class SettingsTypeError(TypeError):
|
||||
@ -76,6 +79,9 @@ if not isinstance(BRANCH, str):
|
||||
if not isinstance(WHAT_TO_DEPLOY, str):
|
||||
raise SettingsTypeError('WHAT_TO_DEPLOY should be a string')
|
||||
|
||||
if not isinstance(SSH_KEY_NAME, str):
|
||||
raise SettingsTypeError('SSH_KEY_NAME should be a string')
|
||||
|
||||
if not isinstance(USE_KEYPAIRS_FILE, bool):
|
||||
msg = 'USE_KEYPAIRS_FILE should be a boolean (True or False)'
|
||||
raise SettingsTypeError(msg)
|
||||
@ -105,6 +111,11 @@ if WHAT_TO_DEPLOY not in ['servers', 'clients']:
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(WHAT_TO_DEPLOY))
|
||||
|
||||
if SSH_KEY_NAME in ['not-set-yet', '', None]:
|
||||
raise ValueError('SSH_KEY_NAME should be set. '
|
||||
'The AWS deployment configuration file sets it to {}'.
|
||||
format(SSH_KEY_NAME))
|
||||
|
||||
# Since we assume 'gp2' volumes (for now), the possible range is 1 to 16384
|
||||
if EBS_VOLUME_SIZE > 16384:
|
||||
raise ValueError('EBS_VOLUME_SIZE should be <= 16384. '
|
||||
@ -193,7 +204,7 @@ for _ in range(NUM_NODES):
|
||||
ImageId=IMAGE_ID,
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
KeyName='bigchaindb',
|
||||
KeyName=SSH_KEY_NAME,
|
||||
InstanceType=INSTANCE_TYPE,
|
||||
SecurityGroupIds=['bigchaindb'],
|
||||
BlockDeviceMappings=[dm],
|
||||
@ -204,7 +215,7 @@ for _ in range(NUM_NODES):
|
||||
ImageId=IMAGE_ID,
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
KeyName='bigchaindb',
|
||||
KeyName=SSH_KEY_NAME,
|
||||
InstanceType=INSTANCE_TYPE,
|
||||
SecurityGroupIds=['bigchaindb']
|
||||
)
|
||||
@ -281,6 +292,20 @@ with open('hostlist.py', 'w') as f:
|
||||
f.write('\n')
|
||||
f.write('public_dns_names = {}\n'.format(public_dns_names))
|
||||
|
||||
# Create a file named ssh_key.py
|
||||
# containing the location of the private SSH key file.
|
||||
# If a ssh_key.py already exists, it will be overwritten.
|
||||
print('Writing ssh_key.py')
|
||||
with open('ssh_key.py', 'w') as f:
|
||||
f.write('# -*- coding: utf-8 -*-\n')
|
||||
f.write('"""This file exists as a convenient way for Fabric to get\n')
|
||||
f.write('the location of the private SSH key file.')
|
||||
f.write('"""\n')
|
||||
f.write('\n')
|
||||
f.write('from __future__ import unicode_literals\n')
|
||||
f.write('\n')
|
||||
home = expanduser('~')
|
||||
f.write('ssh_key_path = "{}/.ssh/{}"\n'.format(home, SSH_KEY_NAME))
|
||||
|
||||
# For each node in the cluster, check port 22 (ssh) until it's reachable
|
||||
for instance in instances_with_tag:
|
||||
|
@ -36,3 +36,37 @@ Default output format [None]: [Press Enter]
|
||||
```
|
||||
|
||||
This writes two files: `~/.aws/credentials` and `~/.aws/config`. AWS tools and packages look for those files.
|
||||
|
||||
|
||||
## Generate an RSA Key Pair for SSH
|
||||
|
||||
Eventually, you'll have one or more instances (virtual machines) running on AWS and you'll want to SSH to them. To do that, you need a public/private key pair. The public key will be sent to AWS, and you can tell AWS to put it in any instances you provision there. You'll keep the private key on your local workstation.
|
||||
|
||||
First you need to make up a key name. Some ideas:
|
||||
|
||||
* `bcdb-troy-1`
|
||||
* `bigchaindb-7`
|
||||
* `bcdb-jupiter`
|
||||
|
||||
If you already have key pairs on AWS (Amazon EC2), you have to pick a name that's not already being used.
|
||||
Below, replace every instance of `<key-name>` with your actual key name.
|
||||
To generate a public/private RSA key pair with that name:
|
||||
```text
|
||||
ssh-keygen -t rsa -C "<key-name>" -f ~/.ssh/<key-name>
|
||||
```
|
||||
|
||||
It will ask you for a passphrase. You can use whatever passphrase you like, but don't lose it. Two keys (files) will be created in `~/.ssh/`:
|
||||
|
||||
1. `~/.ssh/<key-name>.pub` is the public key
|
||||
2. `~/.ssh/<key-name>` is the private key
|
||||
|
||||
To send the public key to AWS, use the AWS Command-Line Interface:
|
||||
```text
|
||||
aws ec2 import-key-pair \
|
||||
--key-name "<key-name>" \
|
||||
--public-key-material file://~/.ssh/<key-name>.pub
|
||||
```
|
||||
|
||||
If you're curious why there's a `file://` in front of the path to the public key, see issue [aws/aws-cli#41 on GitHub](https://github.com/aws/aws-cli/issues/41).
|
||||
|
||||
If you want to verify that your key pair was imported by AWS, go to the Amazon EC2 console at [https://console.aws.amazon.com/ec2/](https://console.aws.amazon.com/ec2/), select the region you gave above when you did `aws configure` (e.g. eu-central-1), click on **Key Pairs** in the left sidebar, and check that `<key-name>` is listed.
|
||||
|
@ -41,16 +41,6 @@ See the page about [basic AWS Setup](../appendices/aws-setup.html) in the Append
|
||||
|
||||
The AWS cluster deployment scripts use elastic IP addresses (although that may change in the future). By default, AWS accounts get five elastic IP addresses. If you want to deploy a cluster with more than five nodes, then you will need more than five elastic IP addresses; you may have to apply for those; see [the AWS documentation on elastic IP addresses](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html).
|
||||
|
||||
|
||||
## Create an Amazon EC2 Key Pair
|
||||
|
||||
Go to the AWS EC2 Console and select "Key Pairs" in the left sidebar. Click the "Create Key Pair" button. Give it the name `bigchaindb`. You should be prompted to save a file named `bigchaindb.pem`. That file contains the RSA private key. (You can get the public key from the private key, so there's no need to send it separately.)
|
||||
|
||||
Save the file in `bigchaindb/deploy-cluster-aws/pem/bigchaindb.pem`.
|
||||
|
||||
**You should not share your private key.**
|
||||
|
||||
|
||||
## Create an Amazon EC2 Security Group
|
||||
|
||||
Go to the AWS EC2 Console and select "Security Groups" in the left sidebar. Click the "Create Security Group" button. Name it `bigchaindb`. The description probably doesn't matter; you can also put `bigchaindb` for that.
|
||||
@ -132,6 +122,7 @@ Step 2 is to make an AWS deployment configuration file, if necessary. There's an
|
||||
NUM_NODES=3
|
||||
BRANCH="master"
|
||||
WHAT_TO_DEPLOY="servers"
|
||||
SSH_KEY_NAME="not-set-yet"
|
||||
USE_KEYPAIRS_FILE=False
|
||||
IMAGE_ID="ami-accff2b1"
|
||||
INSTANCE_TYPE="m3.2xlarge"
|
||||
@ -140,7 +131,7 @@ EBS_VOLUME_SIZE=30
|
||||
EBS_OPTIMIZED=False
|
||||
```
|
||||
|
||||
If you're happy with those settings, then you can skip to the next step. Otherwise, you could make a copy of `example_deploy_conf.py` (e.g. `cp example_deploy_conf.py my_deploy_conf.py`) and then edit the copy using a text editor.
|
||||
Make a copy of that file and call it whatever you like (e.g. `cp example_deploy_conf.py my_deploy_conf.py`). You can leave most of the settings at their default values, but you must change the value of `SSH_KEY_NAME` to the name of your private SSH key. You can do that with a text editor. Set `SSH_KEY_NAME` to the name you used for `<key-name>` when you generated an RSA key pair for SSH (in basic AWS setup).
|
||||
|
||||
If you want your nodes to have a predictable set of pre-generated keypairs, then you should 1) set `USE_KEYPAIRS_FILE=True` in the AWS deployment configuration file, and 2) provide a `keypairs.py` file containing enough keypairs for all of your nodes. You can generate a `keypairs.py` file using the `write_keypairs_file.py` script. For example:
|
||||
```text
|
||||
|
@ -14,4 +14,3 @@ In a production environment, a BigchainDB node can have several other components
|
||||
* A RethinkDB proxy server
|
||||
* Scalable storage for RethinkDB (e.g. using RAID)
|
||||
* Monitoring software, to monitor all the machines in the node
|
||||
* Maybe a configuration management (CM) server and CM agents on all machines
|
||||
|
@ -5,4 +5,6 @@ Production Node Setup & Management
|
||||
:maxdepth: 1
|
||||
|
||||
overview
|
||||
install-terraform
|
||||
prov-one-m-aws
|
||||
set-up-ansible
|
||||
|
27
docs/source/prod-node-setup-mgmt/install-terraform.md
Normal file
27
docs/source/prod-node-setup-mgmt/install-terraform.md
Normal file
@ -0,0 +1,27 @@
|
||||
# Install Terraform
|
||||
|
||||
The [Terraform documentation has installation instructions](https://www.terraform.io/intro/getting-started/install.html) for all common operating systems.
|
||||
|
||||
Note: Hashicorp (the company behind Terraform) will try to convince you that running Terraform on their servers (inside Atlas) would be great. **While that might be true for many, it is not true for BigchainDB.** BigchainDB federations are supposed to be decentralized, and if everyone used Atlas, that would be a point of centralization. If you don't want to run Terraform on your local machine, you could install it on a cloud machine under your control (e.g. on AWS).
|
||||
|
||||
## Ubuntu Installation Tips
|
||||
|
||||
If you want to install Terraform on Ubuntu, first [download the .zip file](https://www.terraform.io/downloads.html). Then install it in `/opt`:
|
||||
```text
|
||||
sudo mkdir -p /opt/terraform
|
||||
sudo unzip path/to/zip-file.zip -d /opt/terraform
|
||||
```
|
||||
|
||||
Why install it in `/opt`? See [the answers at Ask Ubuntu](https://askubuntu.com/questions/1148/what-is-the-best-place-to-install-user-apps).
|
||||
|
||||
Next, add `/opt/terraform` to your path. If you use bash for your shell, then you could add this line to `~/.bashrc`:
|
||||
```text
|
||||
export PATH="/opt/terraform:$PATH"
|
||||
```
|
||||
|
||||
After doing that, relaunch your shell or force it to read `~/.bashrc` again, e.g. by doing `source ~/.bashrc`. You can verify that terraform is installed and in your path by doing:
|
||||
```text
|
||||
terraform --version
|
||||
```
|
||||
|
||||
It should say the current version of Terraform.
|
@ -7,6 +7,7 @@ Deploying and managing a production BigchainDB node is much more involved than w
|
||||
* Production nodes need monitoring
|
||||
* Production nodes need maintenance, e.g. software upgrades, scaling
|
||||
|
||||
Thankfully, there are tools to help!
|
||||
Thankfully, there are tools to help! We use:
|
||||
|
||||
This section explains how to use various tools to deploy and manage a production node.
|
||||
* [Terraform](https://www.terraform.io/) to provision infrastructure such as AWS instances, storage and security groups
|
||||
* [Ansible](https://www.ansible.com/) to manage the software installed on that infrastructure (configuration management)
|
||||
|
50
docs/source/prod-node-setup-mgmt/prov-one-m-aws.md
Normal file
50
docs/source/prod-node-setup-mgmt/prov-one-m-aws.md
Normal file
@ -0,0 +1,50 @@
|
||||
# Provision a One-Machine Node on AWS
|
||||
|
||||
This page describes how to provision the resources needed for a one-machine BigchainDB node on AWS using Terraform.
|
||||
|
||||
## Get Set
|
||||
|
||||
First, do the [basic AWS setup steps outlined in the Appendices](../appendices/aws-setup.html).
|
||||
|
||||
Then go to the `.../bigchaindb/ntools/one-m/aws/` directory and open the file `variables.tf`. Most of the variables have sensible default values, but you can change them if you like. In particular, you may want to change `aws_region`. (Terraform looks in `~/.aws/credentials` to get your AWS credentials, so you don't have to enter those anywhere.)
|
||||
|
||||
The `ssh_key_name` has no default value, so Terraform will prompt you every time it needs it.
|
||||
|
||||
To see what Terraform will do, run:
|
||||
```text
|
||||
terraform plan
|
||||
```
|
||||
|
||||
It should ask you the value of `ssh_key_name`.
|
||||
|
||||
It figured out the plan by reading all the `.tf` Terraform files in the directory.
|
||||
|
||||
If you don't want to be asked for the `ssh_key_name`, you can change the default value of `ssh_key_name` or [you can set an environmen variable](https://www.terraform.io/docs/configuration/variables.html) named `TF_VAR_ssh_key_name`.
|
||||
|
||||
|
||||
## Provision
|
||||
|
||||
To provision all the resources specified in the plan, do the following. **Note: This will provision actual resources on AWS, and those cost money. Be sure to shut down the resources you don't want to keep running later, otherwise the cost will keep growing.**
|
||||
```text
|
||||
terraform apply
|
||||
```
|
||||
|
||||
Terraform will report its progress as it provisions all the resources. Once it's done, you can go to the Amazon EC2 web console and see the instance, its security group, its elastic IP, and its attached storage volumes (one for the root directory and one for RethinkDB storage).
|
||||
|
||||
At this point, there is no software installed on the instance except for Ubuntu 14.04 and whatever else came with the Amazon Machine Image (AMI) specified in the configuration. The next step is to use Ansible to install and configure all the necessary software.
|
||||
|
||||
|
||||
## (Optional) "Destroy"
|
||||
|
||||
If you want to shut down all the resources just provisioned, you must first disable termination protection on the instance:
|
||||
|
||||
1. Go to the EC2 console and select the instance you just launched. It should be named `BigchainDB_node`.
|
||||
2. Click **Actions** > **Instance Settings** > **Change Termination Protection** > **Yes, Disable**
|
||||
3. Back in your terminal, do `terraform destroy`
|
||||
|
||||
Terraform should "destroy" (i.e. terminate or delete) all the AWS resources you provisioned above.
|
||||
|
||||
## See Also
|
||||
|
||||
* The [Terraform Documentation](https://www.terraform.io/docs/)
|
||||
* The [Terraform Documentation for the AWS "Provider"](https://www.terraform.io/docs/providers/aws/index.html)
|
1
ntools/README.md
Normal file
1
ntools/README.md
Normal file
@ -0,0 +1 @@
|
||||
This directory contains tools for provisioning, deploying and managing a BigchainDB node (on AWS, Azure or wherever).
|
20
ntools/one-m/aws/amis.tf
Normal file
20
ntools/one-m/aws/amis.tf
Normal file
@ -0,0 +1,20 @@
|
||||
# Each AWS region has a different AMI name
|
||||
# even though the contents are the same.
|
||||
# This file has the mapping from region --> AMI name.
|
||||
#
|
||||
# These are all Ubuntu 14.04 LTS AMIs
|
||||
# with Arch = amd64, Instance Type = hvm:ebs-ssd
|
||||
# from https://cloud-images.ubuntu.com/locator/ec2/
|
||||
variable "amis" {
|
||||
type = "map"
|
||||
default = {
|
||||
eu-west-1 = "ami-55452e26"
|
||||
eu-central-1 = "ami-b1cf39de"
|
||||
us-east-1 = "ami-8e0b9499"
|
||||
us-west-2 = "ami-547b3834"
|
||||
ap-northeast-1 = "ami-49d31328"
|
||||
ap-southeast-1 = "ami-5e429c3d"
|
||||
ap-southeast-2 = "ami-25f3c746"
|
||||
sa-east-1 = "ami-97980efb"
|
||||
}
|
||||
}
|
6
ntools/one-m/aws/outputs.tf
Normal file
6
ntools/one-m/aws/outputs.tf
Normal file
@ -0,0 +1,6 @@
|
||||
# You can get the value of "ip_address" after running terraform apply using:
|
||||
# $ terraform output ip_address
|
||||
# You could use that in a script, for example
|
||||
output "ip_address" {
|
||||
value = "${aws_eip.ip.public_ip}"
|
||||
}
|
6
ntools/one-m/aws/providers.tf
Normal file
6
ntools/one-m/aws/providers.tf
Normal file
@ -0,0 +1,6 @@
|
||||
provider "aws" {
|
||||
# An AWS access_key and secret_key are needed; Terraform looks
|
||||
# for an AWS credentials file in the default location.
|
||||
# See https://tinyurl.com/pu8gd9h
|
||||
region = "${var.aws_region}"
|
||||
}
|
47
ntools/one-m/aws/resources.tf
Normal file
47
ntools/one-m/aws/resources.tf
Normal file
@ -0,0 +1,47 @@
|
||||
# One instance (virtual machine) on AWS:
|
||||
# https://www.terraform.io/docs/providers/aws/r/instance.html
|
||||
resource "aws_instance" "instance" {
|
||||
ami = "${lookup(var.amis, var.aws_region)}"
|
||||
instance_type = "${var.aws_instance_type}"
|
||||
tags {
|
||||
Name = "BigchainDB_node"
|
||||
}
|
||||
ebs_optimized = true
|
||||
key_name = "${var.ssh_key_name}"
|
||||
vpc_security_group_ids = ["${aws_security_group.node_sg1.id}"]
|
||||
root_block_device = {
|
||||
volume_type = "gp2"
|
||||
volume_size = "${var.root_storage_in_GiB}"
|
||||
delete_on_termination = true
|
||||
}
|
||||
# Enable EC2 Instance Termination Protection
|
||||
disable_api_termination = true
|
||||
}
|
||||
|
||||
# This EBS volume will be used for database storage (not for root).
|
||||
# https://www.terraform.io/docs/providers/aws/r/ebs_volume.html
|
||||
resource "aws_ebs_volume" "db_storage" {
|
||||
type = "gp2"
|
||||
availability_zone = "${aws_instance.instance.availability_zone}"
|
||||
# Size in GiB (not GB!)
|
||||
size = "${var.DB_storage_in_GiB}"
|
||||
tags {
|
||||
Name = "BigchainDB_db_storage"
|
||||
}
|
||||
}
|
||||
|
||||
# This allocates a new elastic IP address, if necessary
|
||||
# and then associates it with the above aws_instance
|
||||
resource "aws_eip" "ip" {
|
||||
instance = "${aws_instance.instance.id}"
|
||||
vpc = true
|
||||
}
|
||||
|
||||
# This attaches the instance to the EBS volume for RethinkDB storage
|
||||
# https://www.terraform.io/docs/providers/aws/r/volume_attachment.html
|
||||
resource "aws_volume_attachment" "ebs_att" {
|
||||
# Why /dev/sdp? See https://tinyurl.com/z2zqm6n
|
||||
device_name = "/dev/sdp"
|
||||
volume_id = "${aws_ebs_volume.db_storage.id}"
|
||||
instance_id = "${aws_instance.instance.id}"
|
||||
}
|
89
ntools/one-m/aws/security_group.tf
Normal file
89
ntools/one-m/aws/security_group.tf
Normal file
@ -0,0 +1,89 @@
|
||||
resource "aws_security_group" "node_sg1" {
|
||||
name_prefix = "BigchainDB_"
|
||||
description = "Single-machine BigchainDB node security group"
|
||||
tags = {
|
||||
Name = "BigchainDB_one-m"
|
||||
}
|
||||
|
||||
# Allow *all* outbound traffic
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# DNS
|
||||
ingress {
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# HTTP used by some package managers
|
||||
ingress {
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# NTP daemons use port 123 but the request will
|
||||
# come from inside the firewall so a response is expected
|
||||
|
||||
# SNMP (e.g. for server monitoring)
|
||||
ingress {
|
||||
from_port = 161
|
||||
to_port = 161
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# HTTPS used when installing RethinkDB
|
||||
# and by some package managers
|
||||
ingress {
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# StatsD
|
||||
ingress {
|
||||
from_port = 8125
|
||||
to_port = 8125
|
||||
protocol = "udp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# Don't allow port 8080 for the RethinkDB web interface.
|
||||
# Use a SOCKS proxy or reverse proxy instead.
|
||||
|
||||
# BigchainDB Client-Server REST API
|
||||
ingress {
|
||||
from_port = 9984
|
||||
to_port = 9984
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
# Port 28015 doesn't have to be open to the outside
|
||||
# since the RethinkDB client and server are on localhost
|
||||
|
||||
# RethinkDB intracluster communications use port 29015
|
||||
ingress {
|
||||
from_port = 29015
|
||||
to_port = 29015
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
19
ntools/one-m/aws/variables.tf
Normal file
19
ntools/one-m/aws/variables.tf
Normal file
@ -0,0 +1,19 @@
|
||||
variable "aws_region" {
|
||||
default = "eu-central-1"
|
||||
}
|
||||
|
||||
variable "aws_instance_type" {
|
||||
default = "m4.xlarge"
|
||||
}
|
||||
|
||||
variable "root_storage_in_GiB" {
|
||||
default = 10
|
||||
}
|
||||
|
||||
variable "DB_storage_in_GiB" {
|
||||
default = 30
|
||||
}
|
||||
|
||||
variable "ssh_key_name" {
|
||||
# No default. Ask as needed.
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user