-
Notifications
You must be signed in to change notification settings - Fork 0
/
neutron-server.scenario
153 lines (125 loc) · 7.08 KB
/
neutron-server.scenario
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# This file can be used directly by 'phd', see 'build-all.sh' in this
# directory for how it can be invoked. The only requirement is a list
# of nodes you'd like it to modify.
#
# The scope of each command-block is controlled by the preceeding
# 'target' line.
#
# - target=all
# The commands are executed on evey node provided
#
# - target=local
# The commands are executed from the node hosting phd. When not
# using phd, they should be run from some other independant host
# (such as the puppet master)
#
# - target=$PHD_ENV_nodes{N}
# The commands are executed on the Nth node provided.
# For example, to run on only the first node would be target=$PHD_ENV_nodes1
#
# Tasks to be performed at this step include:
#################################
# Scenario Requirements Section #
#################################
= VARIABLES =
PHD_VAR_deployment
PHD_VAR_osp_configdir
PHD_VAR_network_hosts_rabbitmq
#################################
# Scenario Requirements Section #
#################################
= REQUIREMENTS =
nodes: 1
######################
# Deployment Scripts #
######################
= SCRIPTS =
target=all
....
# NOTE: once again, careful with vip-* / user / password substitutions and local
# ip addresses via shell expansion
# While neutron can be replaced with nova-network. This how-to does NOT
# contain information on how to deploy nova-network in HA fashion.
# This version of the how-to uses the ML2 plugin. Other supported
# plugins can be used. Please consult the OSP documentation on how
# to configure/deploy other plugins.
# neutron-server requires neutron-agents and viceversa
yum install -y openstack-neutron openstack-neutron-openvswitch openstack-neutron-ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT bind_host $(ip addr show dev eth1 scope global | grep dynamic| sed -e 's#.*inet ##g' -e 's#/.*##g')
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name services
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password neutrontest
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://vip-keystone:35357/v2.0/
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken identity_uri http://vip-keystone:5000/
openstack-config --set /etc/neutron/neutron.conf database connection mysql://neutron:neutrontest@vip-db:3306/neutron
openstack-config --set /etc/neutron/neutron.conf database max_retries -1
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_hosts ${PHD_VAR_network_hosts_rabbitmq}
openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_ha_queues true
openstack-config --set /etc/neutron/neutron.conf DEFAULT notification_driver neutron.openstack.common.notifier.rpc_notifier
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_url http://vip-nova:8774/v2
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://vip-keystone:35357/
openstack-config --set /etc/neutron/neutron.conf nova auth_plugin password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_id default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_id default
openstack-config --set /etc/neutron/neutron.conf nova region_name regionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name services
openstack-config --set /etc/neutron/neutron.conf nova username compute
openstack-config --set /etc/neutron/neutron.conf nova password novatest
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin neutron.plugins.ml2.plugin.Ml2Plugin
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
openstack-config --set /etc/neutron/neutron.conf DEFAULT router_scheduler_driver neutron.scheduler.l3_agent_scheduler.ChanceScheduler
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini type_drivers local,gre,flat,vxlan,vlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks "*"
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_gre tunnel_id_ranges 10:10000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 10:10000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vxlan_group 224.0.0.1
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_security_group True
# is this still required?
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver True
ln -sf /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
# There are a number of different approaches to make neutron highly
# available, we cover only A/A and A/P here but more details
# (internal-only) are available in Bugzilla:
# https://bugzilla.redhat.com/show_bug.cgi?id=1170113#c18
#
# 1) Fully neutron A/A, considering nodes A,B,C
#
# all nodes would have l3_ha=True , max_l3_agents_per_router=3, min=2
# A: host=neutron-n-0 B: host=neutron-n-1 C: host=neutron-n-2
#
# (this way we cover the upgrade path from OSP5->OSP6, by keeping at least one host withb the old neutron-n-0 ID)
#
# 3) A/P, with 1 active node
#
# all nodes would have l3_ha=False
#
# a) A + B + C have host=neutron-n-0
# b) like case 2, but:
# A: host=neutron-n-0 B: (passive not set) C: (passive, not set)
# and neutron scale does the host= change during failover.
# A/P does NOT require extra settings. Defaults are fine
# Fully A/A requires extra neutron-server configuration:
openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha True
openstack-config --set /etc/neutron/neutron.conf DEFAULT max_l3_agents_per_router 0
openstack-config --set /etc/neutron/neutron.conf DEFAULT min_l3_agents_per_router 2
# This value _MUST_ follow the number of nodes in the pacemaker cluster
openstack-config --set /etc/neutron/neutron.conf DEFAULT dhcp_agents_per_network 3
....
target=$PHD_ENV_nodes1
....
. ${PHD_VAR_osp_configdir}/keystonerc_admin
# required when installing the DB manually
neutron-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade kilo
systemctl start neutron-server
systemctl stop neutron-server
# add the service to pacemaker
pcs resource create neutron-server systemd:neutron-server op start timeout=90 --clone interleave=true
if [ $PHD_VAR_deployment = collapsed ]; then
pcs constraint order start keystone-clone then neutron-server-clone
fi
....