-
Notifications
You must be signed in to change notification settings - Fork 3
/
main.tf
332 lines (285 loc) · 7.7 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
# Specify the provider and access details
provider "aws" {
profile = "${var.aws_profile}"
region = "${var.aws_region}"
}
locals {
private_key = "${file(var.ssh_private_key_filename)}"
agent = "${var.ssh_private_key_filename == "/dev/null" ? true : false}"
}
# Runs a local script to return the current user in bash
data "external" "whoami" {
program = ["scripts/local/whoami.sh"]
}
# Create a VPC to launch our instances into
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = "true"
tags {
Name = "${coalesce(var.owner, data.external.whoami.result["owner"])}"
}
}
# Addressable Cluster UUID
data "template_file" "cluster_uuid" {
template = "tf$${uuid}"
vars {
uuid = "${substr(md5(aws_vpc.default.id),0,4)}"
}
}
# Allow overrides of the owner variable or default to whoami.sh
data "template_file" "cluster-name" {
template = "$${username}-tf$${uuid}"
vars {
uuid = "${substr(md5(aws_vpc.default.id),0,4)}"
username = "${format("%.10s", coalesce(var.owner, data.external.whoami.result["owner"]))}"
}
}
# Create DCOS Bucket regardless of what exhibitor backend was chosen
resource "aws_s3_bucket" "dcos_bucket" {
bucket = "${data.template_file.cluster-name.rendered}-bucket"
acl = "private"
force_destroy = "true"
tags {
Name = "${data.template_file.cluster-name.rendered}-bucket"
cluster = "${data.template_file.cluster-name.rendered}"
}
}
# Create an internet gateway to give our subnet access to the outside world
resource "aws_internet_gateway" "default" {
vpc_id = "${aws_vpc.default.id}"
}
# Grant the VPC internet access on its main route table
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.default.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.default.id}"
}
# Create a subnet to launch public nodes into
resource "aws_subnet" "public" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.0.0/22"
map_public_ip_on_launch = true
}
# Create a subnet to launch slave private node into
resource "aws_subnet" "private" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.4.0/22"
map_public_ip_on_launch = true
}
# A security group that allows all port access to internal vpc
resource "aws_security_group" "any_access_internal" {
name = "cluster-security-group"
description = "Manage all ports cluster level"
vpc_id = "${aws_vpc.default.id}"
# full access internally
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["10.0.0.0/8"]
}
# full access internally
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["10.0.0.0/8"]
}
}
# A security group for the ELB so it is accessible via the web
resource "aws_security_group" "http" {
name = "http-security-group"
description = "A security group for the elb"
vpc_id = "${aws_vpc.default.id}"
# http access from anywhere
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# outbound internet access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# A security group for SSH only access
resource "aws_security_group" "ssh" {
name = "ssh-security-group"
description = "SSH only access for terraform and administrators"
vpc_id = "${aws_vpc.default.id}"
# SSH access from anywhere
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${var.admin_cidr}"]
}
}
# A security group for Admins to control access
resource "aws_security_group" "http-https" {
name = "http-https-security-group"
description = "Administrators can manage their machines"
vpc_id = "${aws_vpc.default.id}"
# http access from anywhere
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["${var.admin_cidr}"]
}
# httpS access from anywhere
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["${var.admin_cidr}"]
}
}
# A security group for any machine to download artifacts from the web
# without this, an agent cannot get internet access to pull containers
# This does not expose any ports locally, just external access.
resource "aws_security_group" "internet-outbound" {
name = "internet-outbound-only-access"
description = "Security group to control outbound internet access only."
vpc_id = "${aws_vpc.default.id}"
# outbound internet access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# A security group for the ELB so it is accessible via the web
# with some master ports for internal access only
resource "aws_security_group" "master" {
name = "master-security-group"
description = "Security group for masters"
vpc_id = "${aws_vpc.default.id}"
# Mesos Master access from within the vpc
ingress {
to_port = 5050
from_port = 5050
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
}
# Adminrouter access from within the vpc
ingress {
to_port = 80
from_port = 80
protocol = "tcp"
cidr_blocks = ["${var.admin_cidr}"]
}
# Adminrouter SSL access from anywhere
ingress {
to_port = 443
from_port = 443
protocol = "tcp"
cidr_blocks = ["${var.admin_cidr}"]
}
# Marathon access from within the vpc
ingress {
to_port = 8080
from_port = 8080
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
}
# Exhibitor access from within the vpc
ingress {
to_port = 8181
from_port = 8181
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
}
# Zookeeper Access from within the vpc
ingress {
to_port = 2181
from_port = 2181
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
}
}
# A security group for public slave so it is accessible via the web
resource "aws_security_group" "public_slave" {
name = "public-slave-security-group"
description = "security group for slave public"
vpc_id = "${aws_vpc.default.id}"
# Allow ports within range
ingress {
to_port = 21
from_port = 0
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Allow ports within range
ingress {
to_port = 5050
from_port = 23
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Allow ports within range
ingress {
to_port = 32000
from_port = 5052
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# Allow ports within range
ingress {
to_port = 21
from_port = 0
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
# Allow ports within range
ingress {
to_port = 5050
from_port = 23
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
# Allow ports within range
ingress {
to_port = 32000
from_port = 5052
protocol = "udp"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
KubernetesCluster = "${var.kubernetes_cluster}"
}
}
# A security group for private slave so it is accessible internally
resource "aws_security_group" "private_slave" {
name = "private-slave-security-group"
description = "security group for slave private"
vpc_id = "${aws_vpc.default.id}"
# full access internally
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["10.0.0.0/8"]
}
# full access internally
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["10.0.0.0/8"]
}
tags {
KubernetesCluster = "${var.kubernetes_cluster}"
}
}
# Provide tested AMI and user from listed region startup commands
module "aws-tested-oses" {
source = "./modules/dcos-tested-aws-oses"
os = "${var.os}"
region = "${var.aws_region}"
}