-
Notifications
You must be signed in to change notification settings - Fork 0
/
galera.scenario
177 lines (151 loc) · 4.98 KB
/
galera.scenario
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= VARIABLES =
PHD_VAR_env_password
PHD_VAR_network_domain
PHD_VAR_deployment
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
yum install -y mariadb-galera-server xinetd rsync
if [ $PHD_VAR_deployment = collapsed ]; then
# Allowing the proxy to perform health checks on galera while we're initializing it is... problematic.
pcs resource disable lb-haproxy
fi
cat > /etc/sysconfig/clustercheck << EOF
MYSQL_USERNAME="clustercheck"
MYSQL_PASSWORD="${PHD_VAR_env_password}"
MYSQL_HOST="localhost"
MYSQL_PORT="3306"
EOF
# workaround some old buggy mariadb packages
# that created log files as root:root and newer
# packages would fail to start....
chown mysql:mysql /var/log/mariadb -R
systemctl start mysqld
# required for clustercheck to work
mysql -e "CREATE USER 'clustercheck'@'localhost' IDENTIFIED BY '${PHD_VAR_env_password}';"
systemctl stop mysqld
# Configure galera cluster
# NOTE: wsrep ssl encryption is strongly recommended and should be enabled
# on all production deployments. This how-to does NOT display how to
# configure ssl. The shell expansion points to the internal IP address of the
# node.
cat > /etc/my.cnf.d/galera.cnf << EOF
[mysqld]
skip-name-resolve=1
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
query_cache_size=0
query_cache_type=0
bind_address=$(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
EOF
cat > /etc/xinetd.d/galera-monitor << EOF
service galera-monitor
{
port = 9200
disable = no
socket_type = stream
protocol = tcp
wait = no
user = root
group = root
groups = yes
server = /usr/bin/clustercheck
type = UNLISTED
per_source = UNLIMITED
log_on_success =
log_on_failure = HOST
flags = REUSE
}
EOF
systemctl enable xinetd
systemctl start xinetd
....
target=$PHD_ENV_nodes1
....
# node_list must be of the form node1,node2,node3
#
# node names must be in the form that the cluster knows them as
# (ie. no domains) and there can't be a trailing comma (hence the
# extra weird sed command)
node_list=$(echo $PHD_ENV_nodes | sed -e s/.vmnet.${PHD_VAR_network_domain}\ /,/g -e s/.vmnet.${PHD_VAR_network_domain}//)
pcs resource create galera galera enable_creation=true wsrep_cluster_address="gcomm://${node_list}" additional_parameters='--open-files-limit=16384' meta master-max=3 ordered=true op promote timeout=300s on-fail=block --master
if [ $PHD_VAR_deployment = collapsed ]; then
# Now we can re-enable the proxy
pcs resource enable lb-haproxy
fi
# wait for galera to start and become promoted
loop=0; while ! clustercheck > /dev/null 2>&1 && [ "$loop" -lt 60 ]; do
echo waiting galera to be promoted
loop=$((loop + 1))
sleep 5
done
# this one can fail depending on who bootstrapped the cluster
for node in $PHD_ENV_nodes; do
mysql -e "DROP USER ''@'${node}';" || true
mysql -e "DROP USER 'root'@'${node}';" || true
done
galera_script=galera.setup
echo "" > $galera_script
echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED by 'mysqltest' WITH GRANT OPTION;" >> $galera_script
for db in keystone glance cinder neutron nova heat; do
cat<<EOF >> $galera_script
CREATE DATABASE ${db};
GRANT ALL ON ${db}.* TO '${db}'@'%' IDENTIFIED BY '${db}test';
EOF
done
echo "FLUSH PRIVILEGES;" >> $galera_script
#echo "quit" >> $galera_script
if [ "$loop" -ge 60 ]; then
echo Timeout waiting for galera
else
mysql mysql < $galera_script
mysqladmin flush-hosts
fi
....