-
Notifications
You must be signed in to change notification settings - Fork 0
/
restore.yml
280 lines (250 loc) · 10.4 KB
/
restore.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
---
# Any step that should happen before initiating the osp-d playbook
# This could be validation of the hosts, package installation that is assumed as per the guide, etc..
- name: Prepare our undercloud
hosts: hypervisor
gather_facts: yes
any_errors_fatal: true
vars_files:
- vars/topology/undercloud.yml
vars:
undercloud_image_file: "{{ install.snapshot.image | basename }}"
undercloud_disk_path: "{{ install.disk.pool }}/{{ install.snapshot.filename.rstrip('.qcow2') | basename }}-disk1.qcow2"
undercloud_user: "{{ install.user.name | default('stack') }}"
# todo(yfried): add external refs so users can override these values and customize the Undercloud
tasks:
- name: check and create directory
file:
path: "{{ install.disk.pool }}"
state: directory
owner: root
group: root
mode: 0755
when: install.disk.pool | default(False)
- name: Check if shapshot image is path to file or url
stat:
path: "{{ install.snapshot.image }}"
register: image_file
- name: Fail when snapshot image doesn't exist
fail:
msg: "Provided snapshot image is not found, path - {{ install.snapshot.image }}"
when:
- not image_file.stat.exists
- not install.snapshot.image|regex_search('^http')
- name: Stop Undercloud VM if exists
virt:
command: destroy
name: undercloud-0
ignore_errors: true
- name: Remove Undercloud VM if exists
virt:
command: undefine
name: undercloud-0
ignore_errors: true
- name: Copy snapshot to disk path
copy:
src: "{{ install.snapshot.image }}"
dest: "{{ undercloud_disk_path }}"
remote_src: true
when: image_file.stat.exists
- name: download image
get_url:
url: "{{ install.snapshot.image }}"
dest: "{{ undercloud_disk_path }}"
owner: qemu
group: qemu
force: yes
when: install.snapshot.image|regex_search('^http')
- name: install libguestfs-tool / virt-customize
package:
name: libguestfs-tools
state: present
- name: resets undercloud image configurations
environment:
LIBGUESTFS_BACKEND: direct
command: >
virt-sysprep -a {{ undercloud_disk_path }}
--operations dhcp-client-state,dhcp-server-state,net-hostname,net-hwaddr,udev-persistent-net,ssh-hostkeys
# Copy hypervisor's key into UC root and also for "undercloud_user" as we need this user since next hosts task update inventory
# This is necesarry for muffin because we need to refresh the auhtortized_keys with new hypervisor's pubkey
- name: inject our key into the undercloud image
command: >
virt-customize -a {{ undercloud_disk_path }}
--root-password password:redhat
--mkdir /root/.ssh
--chmod 0700:/root/.ssh
--upload /root/.ssh/id_rsa.pub:/root/.ssh/authorized_keys
--upload /root/.ssh/id_rsa.pub:/home/{{ undercloud_user }}/.ssh/authorized_keys
--chmod 0700:/home/{{ undercloud_user }}/.ssh
--run-command "chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/.ssh/authorized_keys"
--selinux-relabel
- name: create Undercloud VM from snapshot
command: >
virt-install --name {{ undercloud_node.name }}
--disk path={{ undercloud_disk_path }},device=disk,bus=virtio,format=qcow2,cache={{ undercloud_node.disks.disk1.cache }}
--network network:data
--network network:management
--network network:external
--virt-type kvm
--cpu host-model
--ram {{ undercloud_node.memory }}
--vcpus {{ undercloud_node.cpu }}
--os-variant {{ undercloud_node.os.variant }}
--import
--noautoconsole
--autostart
--vnc
--rng /dev/urandom
--dry-run --print-xml
register: virt_xml
- name: define the undercloud VM
virt:
name: "{{ undercloud_node.name }}"
command: define
xml: "{{ virt_xml.stdout }}"
- name: get correct MAC of external interface for undercloud (in case we have multiple dhcp leases in libvirt's db)
shell: |
virsh dumpxml undercloud-0 | grep external -B 1 | grep mac | cut -d\' -f2
register: mac_ext
- name: start the undercloud VM
virt:
name: "{{ undercloud_node.name }}"
state: running
- name: wait for the undercloud IP to become available
shell: |
virsh net-dhcp-leases external | grep {{ mac_ext.stdout }} | awk '($4 == "ipv4") && ($6 =="{{ undercloud_node.name }}") {print $5}'
register: undercloud_ip
until: undercloud_ip.stdout != ''
retries: 15
delay: 15
- name: waiting for the undercloud to be SSH available
wait_for:
port: 22
host: "{{ undercloud_ip.stdout | ipaddr('address') }}"
search_regex: OpenSSH
delay: 10
sleep: 3
- name: add undercloud to host list
add_host:
name: "{{ undercloud_node.name }}"
hostname: "undercloud-0"
node_label: "undercloud-0"
groups: "undercloud,tester,openstack_nodes"
ansible_user: "{{ undercloud_user }}"
ansible_ssh_private_key_file: "{{ hostvars[groups['overcloud_nodes'][0]].ansible_ssh_private_key_file }}"
ansible_host: "{{ undercloud_ip.stdout | ipaddr('address') }}"
ansible_ssh_common_args: "
-o BatchMode=yes \
-o ForwardAgent=yes \
-o ServerAliveInterval=30 \
-o ControlMaster=auto \
-o ControlPersist=30m \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o ProxyCommand=\"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
-W %h:%p -i {{ ansible_ssh_private_key_file }} \
{{ ansible_user|default(ansible_ssh_user) }}@{{ ansible_host|default(ansible_ssh_host) }}\""
notify:
- update /etc/hosts with undercloud's details
- include_role:
name: inventory-update
apply:
delegate_to: localhost
vars:
inventory_file_name: 'hosts-installer'
# create stack user on hypervisor, because on clean machine we can have it missing
- block:
- name: create stack user on hypervisor
user:
name: "{{ install.user.name }}"
state: present
password: "{{ install.user.password | password_hash('sha512') }}"
- name: set permissions for the user to access the hypervisor
copy:
content: |
[libvirt Management Access]
Identity=unix-user:{{ install.user.name }}
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
dest: "/etc/polkit-1/localauthority/50-local.d/50-libvirt-user-{{ install.user.name }}.pkla"
- name: test openstack environment
delegate_to: "{{ groups.undercloud | first }}"
become: true
become_user: stack
shell: |
source ~/stackrc
openstack server list
tags: skip_ansible_lint
register: result
until: result.rc == 0
retries: 5
handlers:
- name: update /etc/hosts with undercloud's details
lineinfile:
dest: "/etc/hosts"
line: "{{ hostvars[groups['undercloud'][0]].ansible_host|
default(hostvars[groups['undercloud'][0]].ansible_ssh_host) }} \
{{ groups['undercloud'][0] }}.redhat.local \
{{ groups['undercloud'][0] }}"
state: present
# workaround for selinux errors while introspection from qs image
- name: fix selinux configurations
hosts: undercloud
become: true
gather_facts: yes
any_errors_fatal: true
tasks:
- name: Change setype of /httpboot/ to httpd_sys
file:
path: /httpboot
setype: httpd_sys_content_t
recurse: yes
# need to setup shade node after restore
- import_playbook: shade.yml
tags: restore_shade
# update clouds.yml basing on the starckrc file from the restored undercloud
- name: update clouds.yml
hosts: undercloud
gather_facts: no
any_errors_fatal: true
tags: restore_clouds_yml
tasks:
- name: update clouds.yaml file
include_tasks: clouds.yml
vars:
auth_file_path: "/home/stack/stackrc"
cloudname: "undercloud"
- name: print core/director puddle version
hosts: undercloud
gather_facts: no
any_errors_fatal: false
tags: restore_puddle_version
tasks:
- stat:
path: ~/core_puddle_version
register: puddle_file
- name: get core puddle version from ~/core_puddle_version
command: cat ~/core_puddle_version
register: core_puddle_file
when: puddle_file.stat.exists
- name: get core puddle version from repos
shell: cat /etc/yum.repos.d/rhos-release-[0-9]*.repo | grep ^baseurl.*/OpenStack/ | grep -v latest | awk -F / '{print $8 }' | tail -n 1
register: core_puddle_repos
when: puddle_file.stat.exists == False
- debug:
msg: "Build mark: core_puddle={{ puddle_file.stat.exists|ternary (core_puddle_file.stdout, core_puddle_repos.stdout) }}"
- find:
use_regex: yes
patterns: '^rhos-release-\d+-director.repo$'
paths:
- '/etc/yum.repos.d/'
register: director_repo_result
- name: get director puddle version
shell: "cat {{ director_repo_result.files[0].path }} | grep ^baseurl.*/OpenStack/ | grep -v latest | awk -F / '{print $8 }' | tail -n 1"
register: director_puddle
when: director_repo_result.matched > 0
- debug:
msg: "Build mark: director_puddle={{ director_puddle.stdout }}"
when: director_repo_result.matched > 0