File tree Expand file tree Collapse file tree 11 files changed +127
-1
lines changed
inventory/sample/group_vars Expand file tree Collapse file tree 11 files changed +127
-1
lines changed Original file line number Diff line number Diff line change @@ -51,6 +51,9 @@ extra_agent_args: ""
5151
5252kube_vip_tag_version : " "
5353
54+ kube_vip_cloud_provider_tag_version : " "
55+ kube_vip_lb_ip_range : " "
56+
5457metal_lb_speaker_tag_version : " "
5558metal_lb_controller_tag_version : " "
5659
Original file line number Diff line number Diff line change 1313 - ipv6
1414 - single_node
1515 - calico
16+ - kube-vip
1617 fail-fast : false
1718 env :
1819 PYTHON_VERSION : " 3.11"
Original file line number Diff line number Diff line change @@ -56,6 +56,13 @@ extra_agent_args: >-
5656# image tag for kube-vip
5757kube_vip_tag_version : " v0.6.4"
5858
59+ # tag for kube-vip-cloud-provider manifest
60+ # kube_vip_cloud_provider_tag_version: "main"
61+
62+ # kube-vip ip range for load balancer
63+ # (uncomment to use kube-vip for services instead of MetalLB)
64+ # kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
65+
5966# metallb type frr or native
6067metal_lb_type : " native"
6168
Original file line number Diff line number Diff line change @@ -15,6 +15,8 @@ We have these scenarios:
1515 Very similar to the default scenario, but uses only a single node for all cluster functionality.
1616- ** calico** :
1717 The same as single node, but uses calico cni instead of flannel.
18+ - ** kube-vip**
19+ The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
1820
1921## How to execute
2022
Original file line number Diff line number Diff line change 1+ ---
2+ dependency :
3+ name : galaxy
4+ driver :
5+ name : vagrant
6+ platforms :
7+ - name : control1
8+ box : generic/ubuntu2204
9+ memory : 4096
10+ cpus : 4
11+ config_options :
12+ # We currently can not use public-key based authentication on Ubuntu 22.04,
13+ # see: https://github.com/chef/bento/issues/1405
14+ ssh.username : " vagrant"
15+ ssh.password : " vagrant"
16+ groups :
17+ - k3s_cluster
18+ - master
19+ interfaces :
20+ - network_name : private_network
21+ ip : 192.168.30.62
22+ provisioner :
23+ name : ansible
24+ env :
25+ ANSIBLE_VERBOSITY : 1
26+ playbooks :
27+ converge : ../resources/converge.yml
28+ side_effect : ../resources/reset.yml
29+ verify : ../resources/verify.yml
30+ inventory :
31+ links :
32+ group_vars : ../../inventory/sample/group_vars
33+ scenario :
34+ test_sequence :
35+ - dependency
36+ - cleanup
37+ - destroy
38+ - syntax
39+ - create
40+ - prepare
41+ - converge
42+ # idempotence is not possible with the playbook in its current form.
43+ - verify
44+ # We are repurposing side_effect here to test the reset playbook.
45+ # This is why we do not run it before verify (which tests the cluster),
46+ # but after the verify step.
47+ - side_effect
48+ - cleanup
49+ - destroy
Original file line number Diff line number Diff line change 1+ ---
2+ - name : Apply overrides
3+ hosts : all
4+ tasks :
5+ - name : Override host variables
6+ ansible.builtin.set_fact :
7+ # See:
8+ # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
9+ flannel_iface : eth1
10+
11+ # The test VMs might be a bit slow, so we give them more time to join the cluster:
12+ retry_count : 45
13+
14+ # Make sure that our IP ranges do not collide with those of the other scenarios
15+ apiserver_endpoint : " 192.168.30.225"
16+ # Use kube-vip instead of MetalLB
17+ kube_vip_lb_ip_range : " 192.168.30.110-192.168.30.119"
Original file line number Diff line number Diff line change 1+ ---
2+ - name : Create manifests directory on first master
3+ file :
4+ path : /var/lib/rancher/k3s/server/manifests
5+ state : directory
6+ owner : root
7+ group : root
8+ mode : 0644
9+ when : ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
10+
11+ - name : Download vip cloud provider manifest to first master
12+ ansible.builtin.get_url :
13+ url : " https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
14+ dest : " /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
15+ owner : root
16+ group : root
17+ mode : 0644
18+ when : ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
19+
20+ - name : Copy kubevip configMap manifest to first master
21+ template :
22+ src : " kubevip.yaml.j2"
23+ dest : " /var/lib/rancher/k3s/server/manifests/kubevip.yaml"
24+ owner : root
25+ group : root
26+ mode : 0644
27+ when : ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
Original file line number Diff line number Diff line change 2929- name : Deploy metallb manifest
3030 include_tasks : metallb.yml
3131 tags : metallb
32+ when : kube_vip_lb_ip_range is not defined
33+
34+ - name : Deploy kube-vip manifest
35+ include_tasks : kube-vip.yml
36+ tags : kubevip
37+ when : kube_vip_lb_ip_range is defined
3238
3339- name : Init cluster inside the transient k3s-init service
3440 command :
Original file line number Diff line number Diff line change 1+ ---
2+ apiVersion: v1
3+ kind: ConfigMap
4+ metadata:
5+ name: kubevip
6+ namespace: kube-system
7+ data:
8+ {% if kube_vip_lb_ip_range is string %}
9+ {# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
10+ {# => transform to list with single element #}
11+ {% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range ] %}
12+ {% endif %}
13+ range-global: {{ kube_vip_lb_ip_range | join(',') }}
Original file line number Diff line number Diff line change 4343 - name: vip_ddns
4444 value: "false"
4545 - name: svc_enable
46- value: "false"
46+ value: "{{ 'true' if kube_vip_lb_ip_range is defined else ' false' }} "
4747 - name: vip_leaderelection
4848 value: "true"
4949 - name: vip_leaseduration
You can’t perform that action at this time.
0 commit comments