Skip to content

Commit

Permalink
Add possibility to deploy OpenShift in vagrant machine
Browse files Browse the repository at this point in the history
Signed-off-by: Lukianov Artyom <[email protected]>
  • Loading branch information
Lukianov Artyom committed Jan 24, 2018
1 parent 07a16aa commit 09235bd
Show file tree
Hide file tree
Showing 7 changed files with 117 additions and 43 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ bin/*
.vagrant
cluster/vagrant/.kubeconfig
cluster/vagrant/.kubectl
cluster/vagrant/.oc
cluster/.console.vv
build-tools/desc/desc
hack/config-local.sh
Expand Down
69 changes: 31 additions & 38 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Calling 'vagrant up' directly is not supported. Instead, please run the followi
END
end

$deploy_openshift = ENV['DEPLOY_OPENSHIFT'] == 'true'
$use_nfs = ENV['VAGRANT_USE_NFS'] == 'true'
$use_rng = ENV['VAGRANT_USE_RNG'] == 'true'
$cache_docker = ENV['VAGRANT_CACHE_DOCKER'] == 'true'
Expand Down Expand Up @@ -70,60 +71,52 @@ Vagrant.configure(2) do |config|
rsync__args: ["--archive", "--delete"]
end

config.vm.provision "shell", inline: <<-SHELL
#!/bin/bash
set -xe
sed -i -e "s/PasswordAuthentication no/PasswordAuthentication yes/" /etc/ssh/sshd_config
systemctl restart sshd
# FIXME, sometimes eth1 does not come up on Vagrant on latest fc26
sudo ifup eth1
SHELL
config.vm.provision "shell" do |s|
s.path = "cluster/vagrant/setup_common.sh"
s.args = ["#{$master_ip}", "#{$nodes}"]
end

config.vm.define "master" do |master|
master.vm.hostname = "master"
master.vm.network "private_network", ip: "#{$master_ip}", libvirt__network_name: $libvirt_prefix + "0"
master.vm.provider :libvirt do |domain|
domain.memory = 3000
if $cache_docker then
domain.storage :file, :size => '10G', :path => $libvirt_prefix.to_s + '_master_docker.img', :allow_existing => true
domain.storage :file, :size => '10G', :path => $libvirt_prefix.to_s + '_master_docker.img', :allow_existing => true
end
end

master.vm.provision "shell", inline: <<-SHELL
#!/bin/bash
set -xe
export MASTER_IP=#{$master_ip}
export WITH_LOCAL_NFS=true
export NETWORK_PROVIDER=#{$network_provider}
cd /vagrant/cluster/vagrant
bash setup_kubernetes_master.sh
set +x
echo -e "\033[0;32m Deployment was successful!"
echo -e "Cockpit is accessible at https://#{$master_ip}:9090."
echo -e "Credentials for Cockpit are 'root:vagrant'.\033[0m"
SHELL
master.vm.provision "shell" do |s|
if $deploy_openshift then
s.path = "cluster/vagrant/setup_openshift_master.sh"
else
s.path = "cluster/vagrant/setup_kubernetes_master.sh"
end
s.args = ["#{$master_ip}", "#{$nodes}", "#{$network_provider}"]
end
end

(0..($nodes-1)).each do |suffix|
config.vm.define "node" + suffix.to_s do |node|
node.vm.hostname = "node" + suffix.to_s
node.vm.network "private_network", ip: $master_ip[0..-2] + ($master_ip[-1].to_i + 1 + suffix).to_s, libvirt__network_name: $libvirt_prefix + "0"
node.vm.provider :libvirt do |domain|
domain.memory = 2048
if $cache_docker then
domain.storage :file, :size => '10G', :path => $libvirt_prefix.to_s + '_node_docker' + suffix.to_s + '.img', :allow_existing => true
end
node.vm.hostname = "node" + suffix.to_s
node_ip = $master_ip[0..-2] + ($master_ip[-1].to_i + 1 + suffix).to_s
node.vm.network "private_network", ip: node_ip, libvirt__network_name: $libvirt_prefix + "0"

node.vm.provider :libvirt do |domain|
domain.memory = 2048
if $cache_docker then
domain.storage :file, :size => '10G', :path => $libvirt_prefix.to_s + '_node_docker' + suffix.to_s + '.img', :allow_existing => true
end
end

node.vm.provision "shell", inline: <<-SHELL
#!/bin/bash
set -xe
export MASTER_IP=#{$master_ip}
cd /vagrant/cluster/vagrant
bash setup_kubernetes_node.sh
set +x
echo -e "\033[0;32m Deployment was successful!\033[0m"
SHELL
node.vm.provision "shell" do |s|
if $deploy_openshift then
s.path = "cluster/vagrant/setup_openshift_common.sh"
else
s.path = "cluster/vagrant/setup_kubernetes_node.sh"
end
s.args = ["#{$master_ip}"]
end
end
end
end
16 changes: 16 additions & 0 deletions cluster/vagrant/setup_common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#!/bin/bash
master_ip=$1
nodes=$2

sed -i -e "s/PasswordAuthentication no/PasswordAuthentication yes/" /etc/ssh/sshd_config
systemctl restart sshd
# FIXME, sometimes eth1 does not come up on Vagrant on latest fc26
sudo ifup eth1
sed -i "/$(hostname)/d" /etc/hosts
grep 'master' /etc/hosts || echo "$master_ip master" >> /etc/hosts
IFS=. read ip1 ip2 ip3 ip4 <<< "$master_ip"
for node in $(seq 0 $(($nodes - 1))); do
node_hostname="node$node"
node_ip="$ip1.$ip2.$ip3.$(($ip4 + node + 1))"
grep $node_hostname /etc/hosts || echo "$node_ip $node_hostname" >> /etc/hosts
done
12 changes: 9 additions & 3 deletions cluster/vagrant/setup_kubernetes_master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@
#
# Copyright 2017 Red Hat, Inc.
#
master_ip=$1
network_provider=$3

export KUBERNETES_MASTER=true
bash ./setup_kubernetes_common.sh
bash /vagrant/cluster/vagrant/setup_kubernetes_common.sh

# Cockpit with kubernetes plugin
yum install -y cockpit cockpit-kubernetes
Expand All @@ -44,11 +46,11 @@ done

set -e

if [ "$NETWORK_PROVIDER" == "weave" ]; then
if [ "$network_provider" == "weave" ]; then
kubever=$(kubectl version | base64 | tr -d '\n')
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
else
kubectl create -f kube-$NETWORK_PROVIDER.yaml
kubectl create -f kube-$network_provider.yaml
fi

# Allow scheduling pods on master
Expand All @@ -63,3 +65,7 @@ chown 36:36 /exports/share1
echo "/exports/share1 *(rw,anonuid=36,anongid=36,all_squash,sync,no_subtree_check)" >/etc/exports

systemctl enable nfs-server && systemctl start nfs-server

echo -e "\033[0;32m Deployment was successful!"
echo -e "Cockpit is accessible at https://$master_ip:9090."
echo -e "Credentials for Cockpit are 'root:vagrant'.\033[0m"
7 changes: 5 additions & 2 deletions cluster/vagrant/setup_kubernetes_node.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@
#
# Copyright 2017 Red Hat, Inc.
#
master_ip=$1

bash ./setup_kubernetes_common.sh
bash /vagrant/cluster/vagrant/setup_kubernetes_common.sh

ADVERTISED_MASTER_IP=$(sshpass -p vagrant ssh -oStrictHostKeyChecking=no vagrant@$MASTER_IP hostname -I | cut -d " " -f1)
ADVERTISED_MASTER_IP=$(sshpass -p vagrant ssh -oStrictHostKeyChecking=no vagrant@$master_ip hostname -I | cut -d " " -f1)
set +e

echo 'Trying to register myself...'
Expand All @@ -31,3 +32,5 @@ while [ $? -ne 0 ]; do
# Skipping preflight checks because of https://github.com/kubernetes/kubeadm/issues/6
kubeadm join --token abcdef.1234567890123456 $ADVERTISED_MASTER_IP:6443 --ignore-preflight-errors=all --discovery-token-unsafe-skip-ca-verification=true
done

echo -e "\033[0;32m Deployment was successful!\033[0m"
6 changes: 6 additions & 0 deletions cluster/vagrant/setup_openshift_common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
yum install -y centos-release-openshift-origin
yum install -y wget git net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct docker
systemctl start docker
systemctl enable docker
yum -y update
yum --enablerepo=centos-openshift-origin-testing install -y atomic-openshift-utils
49 changes: 49 additions & 0 deletions cluster/vagrant/setup_openshift_master.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/bin/bash
master_ip=$1
nodes=$2

bash /vagrant/cluster/vagrant/setup_openshift_common.sh

sed -i '/host_key_checking/s/^#//g' /etc/ansible/ansible.cfg
IFS=. read ip1 ip2 ip3 ip4 <<< "$master_ip"
nodes=""
for node in $(seq 0 $(($2 - 1))); do
node_ip="$ip1.$ip2.$ip3.$(($ip4 + node + 1))"
node_hostname="node$node openshift_node_labels=\"{'region': 'infra','zone': 'default'}\" openshift_ip=$node_ip"
nodes="$nodes$node_hostname\n"
done
cat > inventory <<EOF
[OSEv3:children]
masters
nodes
[OSEv3:vars]
ansible_ssh_user=root
ansible_ssh_pass=vagrant
openshift_deployment_type=origin
openshift_clock_enabled=true
openshift_master_identity_providers=[{'name': 'allow_all_auth', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
openshift_disable_check=memory_availability,disk_availability,docker_storage
openshift_repos_enable_testing=True
[masters]
master openshift_ip=$master_ip
[etcd]
master openshift_ip=$master_ip
[nodes]
master openshift_node_labels="{'region': 'infra','zone': 'default'}" openshift_schedulable=true openshift_ip=$master_ip
$nodes
EOF

ansible-playbook -i inventory /usr/share/ansible/openshift-ansible/playbooks/byo/config.yml

# Create OpenShift user
oc create user admin
oc create identity allow_all_auth:admin
oc create useridentitymapping allow_all_auth:admin admin
oadm policy add-cluster-role-to-user cluster-admin admin

echo -e "\033[0;32m Deployment was successful!\033[0m"

0 comments on commit 09235bd

Please sign in to comment.