diff --git a/.gitignore b/.gitignore index bbdf57d..fba7dba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,14 @@ ssh/ *.pem *.tfstate.backup -*.tfstate \ No newline at end of file +*.tfstate + +# Don't version control the actual SSH keys +id_rsa +id_rsa.pub + +# Terraform plugins +.terraform + +# PHPStorm configs +.idea diff --git a/README.md b/README.md index 981059d..b24b6ac 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ---- -maintainer: alexsedova ---- +**Maintainers** +* alexsedova +* [Dzhuneyt](https://github.com/Dzhuneyt) # Terraform + AWS + Docker Swarm setup diff --git a/ami.tf b/ami.tf new file mode 100644 index 0000000..ed9cd58 --- /dev/null +++ b/ami.tf @@ -0,0 +1,22 @@ +# Lookup the current Ubuntu OS +# In a production environment you probably want to +# hardcode the AMI ID, to prevent upgrading to a +# new and potentially broken release. +data "aws_ami" "ubuntu" { + most_recent = true + owners = [ + "099720109477"] + # Canonical + + filter { + name = "name" + values = [ + "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*"] + } + + filter { + name = "virtualization-type" + values = [ + "hvm"] + } +} diff --git a/app-instances.tf b/app-instances.tf index 91b85da..baba1cc 100644 --- a/app-instances.tf +++ b/app-instances.tf @@ -1,67 +1,92 @@ /* Setup our aws provider */ provider "aws" { - access_key = "${var.access_key}" - secret_key = "${var.secret_key}" - region = "${var.region}" + region = var.region + # Read the rest from env variables } -resource "aws_instance" "master" { - ami = "ami-26c43149" - instance_type = "t2.micro" - security_groups = ["${aws_security_group.swarm.name}"] - key_name = "${aws_key_pair.deployer.key_name}" + +# Create the SWARM master node +#TODO Allow more than one master +resource "aws_instance" "swarm_master" { + ami = data.aws_ami.ubuntu.id + instance_type = var.instace_type + vpc_security_group_ids = [ + aws_security_group.allow_http_traffic.id, + aws_security_group.ssh_from_other_ec2_instances.id, + ] + associate_public_ip_address = true + key_name = aws_key_pair.deployer.key_name + subnet_id = aws_subnet.public_subnets[0].id connection { + host = coalesce(self.public_ip, self.private_ip) + type = "ssh" user = "ubuntu" - key_file = "ssh/key" + private_key = "${file("${path.module}/id_rsa")}" } provisioner "remote-exec" { inline = [ - "sudo apt-get update", - "sudo apt-get install apt-transport-https ca-certificates", - "sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D", - "sudo sh -c 'echo \"deb https://apt.dockerproject.org/repo ubuntu-trusty main\" > /etc/apt/sources.list.d/docker.list'", - "sudo apt-get update", - "sudo apt-get install -y docker-engine=1.12.0-0~trusty", + "sudo apt-get -q update", + "sudo apt-get install -q -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common", + "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", + "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", + "sudo apt-get -q update", + "sudo apt-get install -y docker-ce docker-ce-cli containerd.io", "sudo docker swarm init", - "sudo docker swarm join-token --quiet worker > /home/ubuntu/token" + "sudo docker swarm join-token --quiet worker > /home/ubuntu/token", ] } + + # Mount the project root inside the master node provisioner "file" { source = "proj" destination = "/home/ubuntu/" } - tags = { - Name = "swarm-master" + + tags = { + Name = "${local.stack_name}-manager-1" } } -resource "aws_instance" "slave" { - count = 2 - ami = "ami-26c43149" - instance_type = "t2.micro" - security_groups = ["${aws_security_group.swarm.name}"] - key_name = "${aws_key_pair.deployer.key_name}" +resource "aws_instance" "swarm_worker" { + count = 2 + ami = data.aws_ami.ubuntu.id + instance_type = var.instace_type + vpc_security_group_ids = [ + aws_security_group.allow_http_traffic.id] + key_name = aws_key_pair.deployer.key_name + subnet_id = aws_subnet.public_subnets[0].id + associate_public_ip_address = true connection { + host = coalesce(self.public_ip, self.private_ip) + type = "ssh" user = "ubuntu" - key_file = "ssh/key" + private_key = "${file("${path.module}/id_rsa")}" } provisioner "file" { - source = "key.pem" - destination = "/home/ubuntu/key.pem" + source = "id_rsa" + destination = "/home/ubuntu/manager_connection_key.pem" } provisioner "remote-exec" { inline = [ "sudo apt-get update", - "sudo apt-get install apt-transport-https ca-certificates", - "sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D", - "sudo sh -c 'echo \"deb https://apt.dockerproject.org/repo ubuntu-trusty main\" > /etc/apt/sources.list.d/docker.list'", + "sudo apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common", + "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", + "sudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"", "sudo apt-get update", - "sudo apt-get install -y docker-engine=1.12.0-0~trusty", - "sudo chmod 400 /home/ubuntu/test.pem", - "sudo scp -o StrictHostKeyChecking=no -o NoHostAuthenticationForLocalhost=yes -o UserKnownHostsFile=/dev/null -i test.pem ubuntu@${aws_instance.master.private_ip}:/home/ubuntu/token .", - "sudo docker swarm join --token $(cat /home/ubuntu/token) ${aws_instance.master.private_ip}:2377" + "sudo apt-get install -y docker-ce docker-ce-cli containerd.io", + "sudo chmod 400 /home/ubuntu/manager_connection_key.pem", + + # Copy the Swarm join token from the manager node to the current worker node + "sudo scp -o StrictHostKeyChecking=no -o NoHostAuthenticationForLocalhost=yes -o UserKnownHostsFile=/dev/null -i manager_connection_key.pem ubuntu@${aws_instance.swarm_master.private_ip}:/home/ubuntu/token .", + + # Join the swarm as a worker + "sudo docker swarm join --token $(cat /home/ubuntu/token) ${aws_instance.swarm_master.private_ip}:2377", + + # Remove the SSH key to access the manager node, from the disk of the worker for extra security + "sudo rm /home/ubuntu/manager_connection_key.pem" ] } - tags = { - Name = "swarm-${count.index}" + tags = { + Name = "${local.stack_name}-worker-${count.index}" } -} \ No newline at end of file +} + diff --git a/internet-gateway.tf b/internet-gateway.tf new file mode 100644 index 0000000..d79e769 --- /dev/null +++ b/internet-gateway.tf @@ -0,0 +1,30 @@ +# Allowing the public subnets to be accessible from the internet, +# requires those subnets to be associated with a route table +# and that route table needs to be associated with an internet gateway +resource "aws_internet_gateway" "internet_gateway" { + vpc_id = aws_vpc.main.id + + tags = { + Name = "${local.stack_name}_igw" + } +} +resource "aws_route_table" "route_table" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.internet_gateway.id + } + + tags = { + Name = "${local.stack_name}" + } +} + +# Associate the subnets with the Route Table + Internet Gateway +resource "aws_route_table_association" "vpc-route-table-association" { + route_table_id = aws_route_table.route_table.id + count = length(local.public_subnets) + + subnet_id = aws_subnet.public_subnets.*.id[count.index] +} diff --git a/key-pairs.tf b/key-pairs.tf index 477310d..ac007c5 100644 --- a/key-pairs.tf +++ b/key-pairs.tf @@ -1,4 +1,5 @@ resource "aws_key_pair" "deployer" { - key_name = "deploy" - public_key = "${file(\"path-to-ssh-public-key\")}" -} \ No newline at end of file + key_name = "deploy" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEO+jiNpr5KOpGR/toIecazlxwJTaZLxhK7YNosQgu7MSJrcu3+W9tvuJK92wDXMZjbhUaJWVOazA/yMHOfblQ3b0RHw5szWU8qlLNbfIxaK9MIZqkTEGaESifmVuxThXOf4wMQytaXqRGjVUMpi1U6l++6PmjGlgLT5ieoXapO8ZRj/6YsDNjzSAtzsmcDDbdh4NpIzlGEGitwKZwUZeayOz+c3EV8IixxdQwVUP0JS8G8ax3IpTnc+qZP0CU6jVgHH5nnSjPMQeIAZU4oEJdcMMAkHVJr+zJWSDL2Zv2pzJ7+vRNGoUlACiPSLb6u2sqPlbtaYj+/2kmo8aA9Tl7 dzhuneyt@dzhuneyt-G5-5587" +} + diff --git a/outputs.tf b/outputs.tf index 9353007..bed97bc 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,3 +1,4 @@ -output "master.ip" { - value = "${aws_instance.master.public_ip}" -} \ No newline at end of file +output "master_ip" { + value = aws_instance.swarm_master.public_ip +} + diff --git a/security-group.tf b/security-group.tf index d2ccfbc..13b0f79 100644 --- a/security-group.tf +++ b/security-group.tf @@ -1,38 +1,56 @@ -/* Default security group */ -resource "aws_security_group" "swarm" { - name = "swarm-group" - description = "Default security group that allows inbound and outbound traffic from all instances in the VPC" +resource "aws_security_group" "ssh_from_other_ec2_instances" { + vpc_id = "${aws_vpc.main.id}" +} - ingress { - from_port = "0" - to_port = "0" - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - self = true - } +# Retrieve current environment IP +data "http" "myip" { + url = "http://ipv4.icanhazip.com" +} +resource "aws_security_group_rule" "ssh_from_other_ec2_instances" { + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + # Opening to 0.0.0.0/0 can lead to security vulnerabilities. + security_group_id = aws_security_group.ssh_from_other_ec2_instances.id + source_security_group_id = aws_security_group.ssh_from_other_ec2_instances.id +} +resource "aws_security_group_rule" "ssh_from_my_computer" { + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = [ + "${chomp(data.http.myip.body)}/32"] + security_group_id = aws_security_group.ssh_from_other_ec2_instances.id +} + +/* Default security group */ +resource "aws_security_group" "allow_http_traffic" { + name = "${local.stack_name}-http-in" + description = "Allow all HTTP traffic in and out on port 80" + vpc_id = "${aws_vpc.main.id}" ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] + from_port = 80 + to_port = 80 + protocol = "-1" + cidr_blocks = [ + "0.0.0.0/0"] + self = true } egress { - from_port = "0" - to_port = "0" - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - self = true + from_port = "0" + to_port = "0" + protocol = "-1" + cidr_blocks = [ + "0.0.0.0/0"] + self = true } - egress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - tags { - Name = "swarm-example" + + tags = { + Name = "${local.stack_name}-http" } -} \ No newline at end of file +} + diff --git a/variables.tf b/variables.tf index 7a24ea2..365ad13 100644 --- a/variables.tf +++ b/variables.tf @@ -1,9 +1,26 @@ -variable "access_key" { - default = "YOUR_ACCESS_KEY" +variable "region" { + default = "us-east-1" } -variable "secret_key" { - default = "YOUR_SECRET_KEY" +variable "instace_type" { + default = "t2.micro" + description = "The AWS EC2 instance type. Defaults to t2.micro if empty" +} +variable "vpc_cidr_range" { + description = "The CIDR block (range) for the subnet that will be created. Defaults to 10.0.0.0/16" + default = "10.0.0.0/16" +} + +locals { + stack_name = "Docker Swarm" + cidr_range = var.vpc_cidr_range + public_subnets = [ + "10.0.1.0/24", + "10.0.2.0/24", + "10.0.3.0/24", + ] + private_subnets = [ + "10.0.10.0/24", + "10.0.11.0/24", + "10.0.13.0/24", + ] } -variable "region" { - default = "your-region" -} \ No newline at end of file diff --git a/versions.tf b/versions.tf new file mode 100644 index 0000000..d9b6f79 --- /dev/null +++ b/versions.tf @@ -0,0 +1,3 @@ +terraform { + required_version = ">= 0.12" +} diff --git a/vpc.tf b/vpc.tf new file mode 100644 index 0000000..a64f552 --- /dev/null +++ b/vpc.tf @@ -0,0 +1,57 @@ +# Create a VPC +resource "aws_vpc" "main" { + cidr_block = var.vpc_cidr_range + enable_dns_hostnames = true + tags = { + Name = "Docker Swarm" + } +} +output "vpc_id" { + # Output the newly created VPC ID + value = aws_vpc.main.id +} + +# Declare the data source +data "aws_availability_zones" "available" { + state = "available" +} +# For high availability we need to create multiple subnets +resource "aws_subnet" "public_subnets" { + vpc_id = aws_vpc.main.id + count = length(local.public_subnets) + cidr_block = local.public_subnets[count.index] + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "${local.stack_name}-public-subnet-${count.index}" + } +} +resource "aws_subnet" "private_subnets" { + vpc_id = aws_vpc.main.id + count = length(local.private_subnets) + cidr_block = local.private_subnets[count.index] + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "${local.stack_name}-private-subnet-${count.index}" + } +} + +# Set the default vpc +data "aws_vpc" "default" { + id = aws_vpc.main.id +} + +# Read all subnet ids for this vpc/region. +data "aws_subnet_ids" "all_subnets" { + vpc_id = aws_vpc.main.id + #data.aws_vpc.default.id + + # Wait for the subnets to be actually created, not just the VPC + depends_on = [ + aws_subnet.public_subnets + ] +} +output "subnet_ids" { + value = data.aws_subnet_ids.all_subnets.ids +}