|
1 | | -import paramiko |
| 1 | +import json |
2 | 2 | import argparse |
| 3 | +import paramiko |
3 | 4 | import traceback |
| 5 | +import zk |
| 6 | +import hdfs |
| 7 | +import tera |
4 | 8 |
|
5 | | -ZK = 'zk' |
6 | | -HDFS = 'hdfs' |
| 9 | +class SSH(): |
| 10 | + def __init__(self): |
| 11 | + self.s = paramiko.SSHClient() |
| 12 | + self.s.load_system_host_keys() |
| 13 | + self.s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) |
7 | 14 |
|
8 | | -def parse_input(): |
9 | | - parser = argparse.ArgumentParser() |
10 | | - parser.add_argument('file', type=str, help='A file describes the zk cluster') |
11 | | - args = parser.parse_args() |
12 | | - return args |
13 | | - |
14 | | -def read_ip_list(args): |
15 | | - try: |
16 | | - fp = open(args.file, 'r') |
17 | | - except: |
18 | | - traceback.print_exc() |
19 | | - |
20 | | - zk_ip_dicts = [] |
21 | | - hdfs_ip_dicts = {'master':[], 'slave':[]} |
22 | | - start_port = 2888 |
23 | | - end_port = 3888 |
24 | | - client_port = 2181 |
25 | | - myid = 1 |
26 | | - while True: |
| 15 | + def run_cmd(self, ip, cmd): |
27 | 16 | try: |
28 | | - comp = fp.readline().split(' ') |
29 | | - if comp[0].startswith(ZK): |
30 | | - zk_ip_dicts.append([comp[1], {'start_port': str(start_port), 'end_port': str(end_port), 'client_port': str(client_port), 'myid': str(myid), 'path': comp[2][:-1]}]) |
31 | | - start_port += 1 |
32 | | - end_port += 1 |
33 | | - client_port += 1 |
34 | | - myid += 1 |
35 | | - elif comp[0].startswith(HDFS): |
36 | | - if comp[3].startswith('master'): |
37 | | - hdfs_ip_dicts['master'].append([comp[1], {'path': comp[2]}]) |
38 | | - else: |
39 | | - hdfs_ip_dicts['slave'].append([comp[1], {'path': comp[2]}]) |
40 | | - else: |
41 | | - break |
| 17 | + self.s.connect(ip) |
| 18 | + stdin, stdout, stderr = self.s.exec_command(cmd) |
| 19 | + self.s.close() |
42 | 20 | except: |
43 | | - break |
44 | | - if hdfs_ip_dicts['slave'] != [] and hdfs_ip_dicts == []: |
45 | | - print 'must have a master!' |
46 | | - return |
47 | | - return zk_ip_dicts, hdfs_ip_dicts |
| 21 | + traceback.print_exc() |
| 22 | + return stdin, stdout, stderr |
| 23 | + |
| 24 | +def parse_input(): |
| 25 | + parser = argparse.ArgumentParser() |
| 26 | + parser.add_argument('file', type=str, help='A file describes the zk cluster') |
| 27 | + parser.add_argument('--docker', type=str, required=True, help='ID of the docker image') |
| 28 | + parser.add_argument('--zk', action='store_true', help='Launch zk') |
| 29 | + parser.add_argument('--hdfs', action='store_true', help='Launch hdfs') |
| 30 | + parser.add_argument('--tera', action='store_true', help='Launch tera') |
| 31 | + args = parser.parse_args() |
| 32 | + return args |
| 33 | + |
| 34 | +def config(args): |
| 35 | + config = json.load(open(args.file, 'r')) |
| 36 | + ip_list = config['ip'].split(':') |
| 37 | + log_prefix = config['log_prefix'] |
| 38 | + zk_cluster = zk.ZkCluster(ip_list, config['zk'], log_prefix) |
| 39 | + zk_cluster.populate_zk_cluster() |
| 40 | + for z in zk_cluster.cluster: |
| 41 | + print z.to_string() |
| 42 | + |
| 43 | + hdfs_cluster = hdfs.HdfsCluster(ip_list, config['hdfs'], log_prefix) |
| 44 | + ret = hdfs_cluster.populate_hdfs_cluster() |
| 45 | + if ret is False: |
| 46 | + exit(1) |
| 47 | + for h in hdfs_cluster.cluster: |
| 48 | + print h.to_string() |
48 | 49 |
|
49 | | -def start_zks(ip_dicts): |
50 | | - if ip_dicts == []: |
| 50 | + tera_cluster = tera.TeraCluster(ip_list, config['tera'], log_prefix) |
| 51 | + tera_cluster.populate_tera_cluster() |
| 52 | + for t in tera_cluster.cluster: |
| 53 | + print t.to_string() |
| 54 | + |
| 55 | + return zk_cluster, hdfs_cluster, tera_cluster |
| 56 | + |
| 57 | +def start_zk(args, zk_cluster, s): |
| 58 | + if (args.hdfs or args.tera) and not args.zk: |
51 | 59 | return |
52 | | - s=paramiko.SSHClient() |
53 | | - s.load_system_host_keys() |
54 | | - s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) |
55 | | - ips = [] |
56 | | - for item in ip_dicts: |
57 | | - ips.append(item[0]) |
58 | | - ips = ' '.join(ips) |
59 | | - for details in ip_dicts: |
60 | | - ip = details[0] |
61 | | - details = details[1] |
62 | | - try: |
63 | | - s.connect(ip) |
64 | | - cmd = 'docker run -t -d -v {dir}:/opt/share -p {cport}:{cport} -p {sport}:{sport} -p {eport}:{eport} --net=host d8 /usr/bin/python /opt/zk_setup.py --servers {ip} --port {cport} --myid {myid}'.\ |
65 | | - format(dir=details['path'], cport=details['client_port'], sport=details['start_port'], eport=details['end_port'], ip=ips, myid=details['myid']) |
66 | | - stdin, stdout, stderr = s.exec_command(cmd) |
67 | | - print cmd |
68 | | - print stdout.read() |
69 | | - print '\n', stderr.read() |
70 | | - s.close() |
71 | | - except: |
72 | | - traceback.print_exc() |
73 | | - |
74 | | -def start_hdfs(ip_dicts): |
75 | | - if ip_dicts == {}: |
| 60 | + for zk_instance in zk_cluster.cluster: |
| 61 | + #print zk_instance.to_string() |
| 62 | + cmd = zk_instance.to_cmd(' '.join(zk_cluster.ip_zk), args.docker) |
| 63 | + print cmd |
| 64 | + s.run_cmd(zk_instance.ip, cmd) |
| 65 | + |
| 66 | +def start_hdfs(args, hdfs_cluster, s): |
| 67 | + if (args.zk or args.tera) and not args.hdfs: |
76 | 68 | return |
77 | | - s = paramiko.SSHClient() |
78 | | - s.load_system_host_keys() |
79 | | - s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) |
80 | | - ips = [] |
81 | | - master = ip_dicts['master'][0] |
82 | | - master_ip = master[0] |
83 | | - master_details = master[1] |
84 | | - slave_list = ip_dicts['slave'] |
85 | | - for item in slave_list: |
86 | | - ips.append(item[0]) |
87 | | - ips = ' '.join(ips) |
88 | | - |
89 | | - cmd = 'docker run -t -d -v {dir}:/opt/share -p 9000:9000 -p 9001:9001 --net=host 67 /usr/bin/python /opt/hdfs_setup.py --masters {master} --slaves {slaves} --mode master'.\ |
90 | | - format(dir=master_details['path'], master=master_ip, slaves=ips) |
91 | | - print cmd |
92 | | - s.connect(master_ip) |
93 | | - #stdin, stdout, stderr = s.exec_command(cmd) |
94 | | - #print stdout.read() |
95 | | - #print '\n', stderr.read() |
96 | | - s.close() |
| 69 | + for hdfs_instance in hdfs_cluster.cluster: |
| 70 | + #print hdfs_instance.to_string() |
| 71 | + cmd = hdfs_instance.to_cmd(args.docker, hdfs_cluster.master_ip, ' '.join(hdfs_cluster.slave_ip)) |
| 72 | + print cmd |
| 73 | + s.run_cmd(hdfs_instance.ip, cmd) |
97 | 74 |
|
98 | | - for slave in slave_list: |
99 | | - slave_ip = slave[0] |
100 | | - slave_details = slave[1] |
101 | | - cmd = 'docker run -t -d -v {dir}:/opt/share -p 9000:9000 -p 9001:9001 --net=host 67 /usr/bin/python /opt/hdfs_setup.py --masters {master} --slaves {slaves} --mode slave'.\ |
102 | | - format(dir=slave_details['path'], master=master_ip, slaves=ips) |
| 75 | +def start_tera(args, tera_cluster, zk_cluster, s): |
| 76 | + if (args.zk or args.hdfs) and not args.tera: |
| 77 | + return |
| 78 | + for tera_instance in tera_cluster.cluster: |
| 79 | + #print tera_instance.to_string() |
| 80 | + cmd = tera_instance.to_cmd(args.docker, ','.join(zk_cluster.ip_tera)) |
103 | 81 | print cmd |
104 | | - s.connect(slave_ip) |
105 | | - #stdin, stdout, stderr = s.exec_command(cmd) |
106 | | - #print stdout.read() |
107 | | - #print '\n', stderr.read() |
108 | | - s.close() |
| 82 | + s.run_cmd(tera_instance.ip, cmd) |
109 | 83 |
|
110 | 84 | def main(): |
111 | 85 | args = parse_input() |
112 | | - zk_ip_dicts, hdfs_ip_dicts = read_ip_list(args) |
113 | | - start_zks(zk_ip_dicts) |
114 | | - start_hdfs(hdfs_ip_dicts) |
| 86 | + zk_cluster, hdfs_cluster, tera_cluster = config(args) |
| 87 | + s = SSH() |
| 88 | + start_zk(args, zk_cluster, s) |
| 89 | + start_hdfs(args, hdfs_cluster, s) |
| 90 | + start_tera(args, tera_cluster, zk_cluster, s) |
115 | 91 |
|
116 | 92 | if __name__ == '__main__': |
117 | 93 | main() |
0 commit comments