forked from metal3-io/metal3-dev-env
-
Notifications
You must be signed in to change notification settings - Fork 0
/
02_configure_host.sh
executable file
·306 lines (268 loc) · 10.9 KB
/
02_configure_host.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
#!/usr/bin/env bash
set -xe
# shellcheck disable=SC1091
source lib/logging.sh
# shellcheck disable=SC1091
source lib/common.sh
# shellcheck disable=SC1091
source lib/network.sh
# shellcheck disable=SC1091
source lib/releases.sh
# Root needs a private key to talk to libvirt
# See tripleo-quickstart-config/roles/virtbmc/tasks/configure-vbmc.yml
if sudo [ ! -f /root/.ssh/id_rsa_virt_power ]; then
sudo ssh-keygen -f /root/.ssh/id_rsa_virt_power -P ""
sudo cat /root/.ssh/id_rsa_virt_power.pub | sudo tee -a /root/.ssh/authorized_keys
fi
ANSIBLE_FORCE_COLOR=true ansible-playbook \
-e "working_dir=$WORKING_DIR" \
-e "num_nodes=$NUM_NODES" \
-e "extradisks=$VM_EXTRADISKS" \
-e "virthost=$HOSTNAME" \
-e "platform=$NODES_PLATFORM" \
-e "libvirt_firmware=$LIBVIRT_FIRMWARE" \
-e "libvirt_secure_boot=$LIBVIRT_SECURE_BOOT" \
-e "default_memory=$DEFAULT_HOSTS_MEMORY" \
-e "manage_baremetal=$MANAGE_BR_BRIDGE" \
-e "provisioning_url_host=$PROVISIONING_URL_HOST" \
-e "nodes_file=$NODES_FILE" \
-e "node_hostname_format=$NODE_HOSTNAME_FORMAT" \
-i vm-setup/inventory.ini \
-b vm-setup/setup-playbook.yml
# Usually virt-manager/virt-install creates this: https://www.redhat.com/archives/libvir-list/2008-August/msg00179.html
if ! sudo virsh pool-uuid default > /dev/null 2>&1 ; then
sudo virsh pool-define /dev/stdin <<EOF
<pool type='dir'>
<name>default</name>
<target>
<path>/var/lib/libvirt/images</path>
</target>
</pool>
EOF
sudo virsh pool-start default
sudo virsh pool-autostart default
fi
if [[ $OS == ubuntu ]]; then
# source ubuntu_bridge_network_configuration.sh
# shellcheck disable=SC1091
source ubuntu_bridge_network_configuration.sh
# shellcheck disable=SC1091
source disable_apparmor_driver_libvirtd.sh
else
if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
# Adding an IP address in the libvirt definition for this network results in
# dnsmasq being run, we don't want that as we have our own dnsmasq, so set
# the IP address here
if [ ! -e /etc/NetworkManager/system-connections/provisioning.nmconnection ] ; then
if [[ "${PROVISIONING_IPV6}" == "true" ]]; then
sudo tee -a /etc/NetworkManager/system-connections/provisioning.nmconnection <<EOF
[connection]
id=provisioning
type=bridge
interface-name=provisioning
[bridge]
stp=false
[ipv4]
method=disabled
[ipv6]
addr-gen-mode=eui64
address1=$PROVISIONING_IP/$PROVISIONING_CIDR
method=manual
EOF
else
sudo tee -a /etc/NetworkManager/system-connections/provisioning.nmconnection <<EOF
[connection]
id=provisioning
type=bridge
interface-name=provisioning
[bridge]
stp=false
[ipv4]
address1=$PROVISIONING_IP/$PROVISIONING_CIDR
method=manual
[ipv6]
addr-gen-mode=eui64
method=disabled
EOF
fi
sudo chmod 600 /etc/NetworkManager/system-connections/provisioning.nmconnection
sudo nmcli con load /etc/NetworkManager/system-connections/provisioning.nmconnection
fi
sudo nmcli con up provisioning
# Need to pass the provision interface for bare metal
if [ "$PRO_IF" ]; then
sudo tee -a /etc/NetworkManager/system-connections/"$PRO_IF".nmconnection <<EOF
[connection]
id=$PRO_IF
type=ethernet
interface-name=$PRO_IF
master=provisioning
slave-type=bridge
EOF
sudo chmod 600 /etc/NetworkManager/system-connections/"$PRO_IF".nmconnection
sudo nmcli con load /etc/NetworkManager/system-connections/"$PRO_IF".nmconnection
sudo nmcli con up "$PRO_IF"
fi
if [ "$MANAGE_INT_BRIDGE" == "y" ]; then
if [[ "$(nmcli con show)" != *"baremetal"* ]]; then
sudo tee /etc/NetworkManager/system-connections/baremetal.nmconnection <<EOF
[connection]
id=baremetal
type=bridge
interface-name=baremetal
autoconnect=true
[bridge]
stp=false
[ipv6]
addr-gen-mode=stable-privacy
method=ignore
EOF
sudo chmod 600 /etc/NetworkManager/system-connections/baremetal.nmconnection
sudo nmcli con load /etc/NetworkManager/system-connections/baremetal.nmconnection
fi
fi
sudo nmcli connection up baremetal
# Add the internal interface to it if requests, this may also be the interface providing
# external access so we need to make sure we maintain dhcp config if its available
if [ "$INT_IF" ]; then
sudo tee /etc/NetworkManager/system-connections/"$INT_IF".nmconnection <<EOF
[connection]
id=$INT_IF
type=ethernet
interface-name=$INT_IF
master=provisioning
slave-type=bridge
EOF
sudo chmod 600 /etc/NetworkManager/system-connections/"$INT_IF".nmconnection
sudo nmcli con load /etc/NetworkManager/system-connections/"$INT_IF".nmconnection
if sudo nmap --script broadcast-dhcp-discover -e "$INT_IF" | grep "IP Offered" ; then
sudo nmcli connection modify baremetal ipv4.method auto
fi
sudo nmcli connection up "$INT_IF"
fi
fi
# Restart the libvirt network so it applies an ip to the bridge
if [ "$MANAGE_BR_BRIDGE" == "y" ] ; then
sudo virsh net-destroy baremetal
sudo virsh net-start baremetal
if [ "$INT_IF" ]; then #Need to bring UP the NIC after destroying the libvirt network
sudo nmcli connection up "$INT_IF"
fi
fi
fi
ANSIBLE_FORCE_COLOR=true ansible-playbook \
-e "{use_firewalld: $USE_FIREWALLD}" \
-e "external_subnet_v4: ${EXTERNAL_SUBNET_V4}" \
-i vm-setup/inventory.ini \
-b vm-setup/firewall.yml
# FIXME(stbenjam): ansbile firewalld module doesn't seem to be doing the right thing
if [ "$USE_FIREWALLD" == "True" ]; then
sudo firewall-cmd --zone=libvirt --change-interface=provisioning
sudo firewall-cmd --zone=libvirt --change-interface=baremetal
fi
# Need to route traffic from the provisioning host.
if [ "$EXT_IF" ]; then
sudo iptables -t nat -A POSTROUTING --out-interface "$EXT_IF" -j MASQUERADE
sudo iptables -A FORWARD --in-interface baremetal -j ACCEPT
fi
# Local registry for images
reg_state=$(sudo "$CONTAINER_RUNTIME" inspect registry --format "{{.State.Status}}" || echo "error")
# ubuntu_install_requirements.sh script restarts docker daemon which causes local registry container to be in exited state.
if [[ "$reg_state" == "exited" ]]; then
sudo "${CONTAINER_RUNTIME}" start registry
elif [[ "$reg_state" != "running" ]]; then
sudo "${CONTAINER_RUNTIME}" rm registry -f || true
sudo "${CONTAINER_RUNTIME}" run -d -p "${REGISTRY_PORT}":5000 --name registry "$DOCKER_REGISTRY_IMAGE"
fi
sleep 5
# Clone all needed repositories (CAPI, CAPM3, BMO, IPAM)
mkdir -p "${M3PATH}"
clone_repo "${BMOREPO}" "${BMOBRANCH}" "${BMOPATH}" "${BMOCOMMIT}"
clone_repo "${CAPM3REPO}" "${CAPM3BRANCH}" "${CAPM3PATH}" "${CAPM3COMMIT}"
clone_repo "${IPAMREPO}" "${IPAMBRANCH}" "${IPAMPATH}" "${IPAMCOMMIT}"
clone_repo "${CAPIREPO}" "${CAPIBRANCH}" "${CAPIPATH}" "${CAPICOMMIT}"
if [[ ${IRONIC_FROM_SOURCE:-} == "true" || ${BUILD_IRONIC_IMAGE_LOCALLY:-} == "true" ]]; then
clone_repo "${IRONIC_IMAGE_REPO}" "${IRONIC_IMAGE_BRANCH}" "${IRONIC_IMAGE_PATH}" "${IRONIC_IMAGE_COMMIT}"
fi
# Pushing images to local registry
for IMAGE_VAR in $(env | grep -v "_LOCAL_IMAGE=" | grep "_IMAGE=" | grep -o "^[^=]*") ; do
IMAGE="${!IMAGE_VAR}"
#shellcheck disable=SC2086
IMAGE_NAME="${IMAGE##*/}"
#shellcheck disable=SC2086
LOCAL_IMAGE="${REGISTRY}/localimages/${IMAGE_NAME}"
sudo "${CONTAINER_RUNTIME}" tag "${IMAGE}" "${LOCAL_IMAGE}"
if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
sudo "${CONTAINER_RUNTIME}" push --tls-verify=false "${LOCAL_IMAGE}"
else
sudo "${CONTAINER_RUNTIME}" push "${LOCAL_IMAGE}"
fi
done
# Support for building local images
for IMAGE_VAR in $(env | grep "_LOCAL_IMAGE=" | grep -o "^[^=]*") ; do
IMAGE="${!IMAGE_VAR}"
cd "${IMAGE}" || exit
#shellcheck disable=SC2086
export $IMAGE_VAR="${IMAGE##*/}"
#shellcheck disable=SC2086
export $IMAGE_VAR="${REGISTRY}/localimages/${!IMAGE_VAR}"
IMAGE_GIT_HASH="$(git rev-parse --short HEAD || echo "nogit")"
# [year]_[day]_[hour][minute]
IMAGE_DATE="$(date -u +%y_%j_%H%M)"
# Support building ironic-image from source
if [[ "${IMAGE}" =~ "ironic" ]] && [[ ${IRONIC_FROM_SOURCE:-} == "true" ]]; then
sudo "${CONTAINER_RUNTIME}" build --build-arg INSTALL_TYPE=source -t "${!IMAGE_VAR}:latest" -t "${!IMAGE_VAR}:${IMAGE_GIT_HASH}_${IMAGE_DATE}" . -f ./Dockerfile
elif [[ "${IMAGE}" =~ "cluster-api" ]]; then
CAPI_GO_VERSION=$(grep "GO_VERSION ?= [0-9].*" Makefile | sed -e 's/GO_VERSION ?= //g')
#shellcheck disable=SC2016
CAPI_BASEIMAGE=$(grep "GO_CONTAINER_IMAGE ?=" Makefile | sed -e 's/GO_CONTAINER_IMAGE ?= //g' -e 's/$(GO_VERSION)//g')
CAPI_TAGGED_BASE_IMAGE="$CAPI_BASEIMAGE$CAPI_GO_VERSION"
sudo DOCKER_BUILDKIT=1 "${CONTAINER_RUNTIME}" build --build-arg builder_image="$CAPI_TAGGED_BASE_IMAGE" --build-arg ARCH="amd64" \
-t "${!IMAGE_VAR}:latest" -t "${!IMAGE_VAR}:${IMAGE_GIT_HASH}_${IMAGE_DATE}" . -f ./Dockerfile
else
sudo "${CONTAINER_RUNTIME}" build -t "${!IMAGE_VAR}" . -f ./Dockerfile
fi
cd - || exit
if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
sudo "${CONTAINER_RUNTIME}" push --tls-verify=false "${!IMAGE_VAR}"
else
sudo "${CONTAINER_RUNTIME}" push "${!IMAGE_VAR}"
fi
done
IRONIC_IMAGE=${IRONIC_LOCAL_IMAGE:-$IRONIC_IMAGE}
VBMC_IMAGE=${VBMC_LOCAL_IMAGE:-$VBMC_IMAGE}
SUSHY_TOOLS_IMAGE=${SUSHY_TOOLS_LOCAL_IMAGE:-$SUSHY_TOOLS_IMAGE}
# Start httpd container
if [[ $OS == ubuntu ]]; then
#shellcheck disable=SC2086
sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name httpd-infra ${POD_NAME_INFRA} \
-v "$IRONIC_DATA_DIR":/shared --entrypoint /bin/runhttpd \
--env "PROVISIONING_INTERFACE=ironicendpoint" "${IRONIC_IMAGE}"
else
#shellcheck disable=SC2086
sudo "${CONTAINER_RUNTIME}" run -d --net host --name httpd-infra ${POD_NAME_INFRA} \
-v "$IRONIC_DATA_DIR":/shared --entrypoint /bin/runhttpd \
"${IRONIC_IMAGE}"
fi
# Start vbmc and sushy containers
#shellcheck disable=SC2086
sudo "${CONTAINER_RUNTIME}" run -d --net host --name vbmc ${POD_NAME_INFRA} \
-v "$WORKING_DIR/virtualbmc/vbmc":/root/.vbmc -v "/root/.ssh":/root/ssh \
"${VBMC_IMAGE}"
#shellcheck disable=SC2086
sudo "${CONTAINER_RUNTIME}" run -d --net host --name sushy-tools ${POD_NAME_INFRA} \
-v "$WORKING_DIR/virtualbmc/sushy-tools":/root/sushy -v "/root/.ssh":/root/ssh \
"${SUSHY_TOOLS_IMAGE}"
# Installing the openstack/ironic clients on the host is optional
# if not installed, we copy a wrapper to OPENSTACKCLIENT_PATH which
# runs the clients in a container (metal3-io/ironic-client)
OPENSTACKCLIENT_PATH="${OPENSTACKCLIENT_PATH:-/usr/local/bin/openstack}"
if ! command -v openstack | grep -v "${OPENSTACKCLIENT_PATH}"; then
sudo ln -sf "${SCRIPTDIR}/openstackclient.sh" "${OPENSTACKCLIENT_PATH}"
sudo ln -sf "${SCRIPTDIR}/openstackclient.sh" "$(dirname "$OPENSTACKCLIENT_PATH")/baremetal"
fi
# Same for the vbmc CLI when not locally installed
VBMC_PATH="${VBMC_PATH:-/usr/local/bin/vbmc}"
if ! command -v vbmc | grep -v "${VBMC_PATH}"; then
sudo ln -sf "${SCRIPTDIR}/vbmc.sh" "${VBMC_PATH}"
fi