diff --git a/all_install.yml b/all_install.yml index e94c870..a6b6896 100644 --- a/all_install.yml +++ b/all_install.yml @@ -59,7 +59,7 @@ tags: - node roles: - - { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups['masters']" } + - { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } ## master -> install common part (for all masters - and sometimes etcd when colocated with masters) - hosts: masters @@ -84,10 +84,10 @@ - role: keepalived tags: [ 'master', 'install', 'master_install', 'ha', 'keepalived'] when: - - ( groups['masters'] | length ) > 1 + - ( groups[cluster_inventory_group.masters] | length ) > 1 - ( custom.networking.masterha_type | default('vip') ) == 'vip' -- hosts: primary-master +- hosts: primary-master name: primary-master (or master in general) - it applies to both ha and non-ha become: yes become_method: sudo @@ -122,7 +122,7 @@ - install - node_install roles: - - { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups['masters']" } + - { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } ## node -> label nodes (even when master is also a node) - hosts: nodes diff --git a/all_reset.yml b/all_reset.yml index 82d8c91..43cfa51 100644 --- a/all_reset.yml +++ b/all_reset.yml @@ -73,8 +73,8 @@ tags: - node roles: - - { role: tools, task: reset, tags: [ 'reset', 'node_reset' ], when: "inventory_hostname not in groups['masters']" } - - { role: tools, task: weave_reset, tags: [ 'reset', 'node_reset', 'network_reset', 'weave_reset', 'weave' ], when: "inventory_hostname not in groups['masters']" } + - { role: tools, task: reset, tags: [ 'reset', 'node_reset' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } + - { role: tools, task: weave_reset, tags: [ 'reset', 'node_reset', 'network_reset', 'weave_reset', 'weave' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } - hosts: masters become: yes diff --git a/group_vars/all/addons.yaml b/group_vars/all/addons.yaml index 419bcd1..3bf9103 100644 --- a/group_vars/all/addons.yaml +++ b/group_vars/all/addons.yaml @@ -106,19 +106,19 @@ helm: ## DASHBOARD ### ################ ## This (v1) will be deprecated in favour of 2.0 - soon to be released -# - { name: dashboard, repo: stable/kubernetes-dashboard, namespace: kube-system, options: '--set image.repository={{ images_repo | default ("registry.k8s.io") }}/kubernetes-dashboard-{{ HOST_ARCH }} --set rbac.create=True,ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }},ingress.hosts[2]={{ groups["primary-master"][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set rbac.create=True,rbac.clusterAdminRole=True --set enableInsecureLogin=True --set enableSkipLogin=True ' } +# - { name: dashboard, repo: stable/kubernetes-dashboard, namespace: kube-system, options: '--set image.repository={{ images_repo | default ("registry.k8s.io") }}/kubernetes-dashboard-{{ HOST_ARCH }} --set rbac.create=True,ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }},ingress.hosts[2]={{ groups[cluster_inventory_group.primary_master][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set rbac.create=True,rbac.clusterAdminRole=True --set enableInsecureLogin=True --set enableSkipLogin=True ' } # For a learning/development --set rbac.clusterAdminRole=True with skip login and insecure might be acceptable, but not for real case scenarios!!! # For in between, one can keep: rbac.clusterReadOnlyRole=True (if bug https://github.com/helm/charts/issues/15118 was solved) # For a production, remove --set enableInsecureLogin=True --set enableSkipLogin=True --set rbac.clusterAdminRole=True # Option 2: # use the below if you are sure you don't need any auth to your dashboard, and you use k8s 1.15 or older. -# - { name: dashboard, repo: stable/kubernetes-dashboard, options: '--set rbac.create=True,ingress.enabled=True,ingress.hosts[0]={{groups["primary-master"][0]}},ingress.hosts[1]=dashboard.{{ custom.networking.dnsDomain }},image.tag=v1.8.3 --version=0.5.3' } +# - { name: dashboard, repo: stable/kubernetes-dashboard, options: '--set rbac.create=True,ingress.enabled=True,ingress.hosts[0]={{groups[cluster_inventory_group.primary_master][0]}},ingress.hosts[1]=dashboard.{{ custom.networking.dnsDomain }},image.tag=v1.8.3 --version=0.5.3' } #################### ## DASHBOARD 2.0 ### #################### - - { name: dashboard, repo: kubernetes-dashboard/kubernetes-dashboard, namespace: monitoring, options: '--set image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard --set ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }},ingress.hosts[2]={{ groups["primary-master"][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set metricsScraper.enabled=true,metricsScraper.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-metrics-scraper --set rbac.create=True,rbac.clusterReadOnlyRole=True --set protocolHttp=true --set kong.image.repository={{ images_repo | default ("docker.io") }}/kong --set kong.admin.tls.enabled=false --set api.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-api --set web.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-web --set auth.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-auth --set api.scaling.replicas=1 --set app.ingress.enabled=True,app.ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},app.ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }},app.ingress.hosts[2]={{ groups["primary-master"][0] }} --set app.scheduling.nodeSelector."node\-role\.kubernetes\.io/infra=" --set app.tolerations[0].effect=NoSchedule,app.tolerations[0].key="node-role.kubernetes.io/infra" --set app.tolerations[1].effect=PreferNoSchedule,app.tolerations[1].key="node-role.kubernetes.io/infra" ' } + - { name: dashboard, repo: kubernetes-dashboard/kubernetes-dashboard, namespace: monitoring, options: '--set image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard --set ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }},ingress.hosts[2]={{ groups[cluster_inventory_group.primary_master][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set metricsScraper.enabled=true,metricsScraper.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-metrics-scraper --set rbac.create=True,rbac.clusterReadOnlyRole=True --set protocolHttp=true --set kong.image.repository={{ images_repo | default ("docker.io") }}/kong --set kong.admin.tls.enabled=false --set api.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-api --set web.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-web --set auth.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-auth --set api.scaling.replicas=1 --set app.ingress.enabled=True,app.ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},app.ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }},app.ingress.hosts[2]={{ groups[cluster_inventory_group.primary_master][0] }} --set app.scheduling.nodeSelector."node\-role\.kubernetes\.io/infra=" --set app.tolerations[0].effect=NoSchedule,app.tolerations[0].key="node-role.kubernetes.io/infra" --set app.tolerations[1].effect=PreferNoSchedule,app.tolerations[1].key="node-role.kubernetes.io/infra" ' } #metricsScraper.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/metrics-scraper --set rbac.create=True,rbac.clusterReadOnlyRole=True --set protocolHttp=true' } # --version 4.0.0' } # https://github.com/kubernetes/dashboard/blob/master/aio/deploy/helm-chart/kubernetes-dashboard/Chart.yaml#L17 diff --git a/group_vars/all/global.yaml b/group_vars/all/global.yaml index 92b071c..8c78750 100644 --- a/group_vars/all/global.yaml +++ b/group_vars/all/global.yaml @@ -5,6 +5,14 @@ # ansible_python_interpreter=/usr/bin/python3 ## (the variable can also be defined per host if there is a need for mix) +## Defines mappings for the original gropus that were used in various conditions. Allows to use the playbook with inventory file that also contains other hosts/groups or even multiple clusters. +cluster_inventory_group: + all: 'all' + masters: 'masters' + nodes: 'nodes' + primary_master: 'primary-master' + secondary_masters: 'secondary-masters' + ##### ## PROXY ## proxy environment variable, mainly for fetching addons diff --git a/only_nodes_only_install.yml b/only_nodes_only_install.yml index c0732a0..a3493c4 100644 --- a/only_nodes_only_install.yml +++ b/only_nodes_only_install.yml @@ -58,7 +58,7 @@ tags: - node roles: - - { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups['masters']" } + - { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } ## node -> install nodes (kubeadm join, etc) - hosts: nodes @@ -70,7 +70,7 @@ - install - node_install roles: - - { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups['masters']" } + - { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } ## node -> label nodes (even when master is also a node) - hosts: nodes diff --git a/only_secondaryMasters_only_install.yml b/only_secondaryMasters_only_install.yml index 49a47ab..bf00011 100644 --- a/only_secondaryMasters_only_install.yml +++ b/only_secondaryMasters_only_install.yml @@ -78,7 +78,7 @@ - role: keepalived tags: [ 'master', 'install', 'master_install', 'ha', 'keepalived'] when: - - ( groups['masters'] | length ) > 1 + - ( groups[cluster_inventory_group.masters] | length ) > 1 - ( custom.networking.masterha_type | default('vip') ) == 'vip' - hosts: secondary-masters diff --git a/roles/common/tasks/decide_master_name.yml b/roles/common/tasks/decide_master_name.yml index 318446a..f4937af 100644 --- a/roles/common/tasks/decide_master_name.yml +++ b/roles/common/tasks/decide_master_name.yml @@ -4,29 +4,29 @@ # https://github.com/ReSearchITEng/kubeadm-playbook/issues/81 ( https://github.com/ansible/ansible/issues/38777 ) - block: - name: by default set master name to inventory definition (no MasterHA case) - set_fact: master_name={{ groups['primary-master'][0] }} + set_fact: master_name={{ groups[cluster_inventory_group.primary_master][0] }} when: - - groups['masters'] | length == 1 + - groups[cluster_inventory_group.masters] | length == 1 - name: force use fqdn for master name (no MasterHA case) if inventory was not defined fqdn and we have to discover... - set_fact: master_name={{ hostvars[groups['primary-master'][0]]['ansible_fqdn'] }} + set_fact: master_name={{ hostvars[groups[cluster_inventory_group.primary_master][0]]['ansible_fqdn'] }} when: - custom.networking.fqdn.always or custom.networking.fqdn.master - - groups['masters'] | length == 1 - - '"." not in groups["primary-master"][0]' # meaning it was not defined with fqdn, but we would like to force fqdn (per above custom.networking.fqdn condition) + - groups[cluster_inventory_group.masters] | length == 1 + - '"." not in groups[cluster_inventory_group.primary_master][0]' # meaning it was not defined with fqdn, but we would like to force fqdn (per above custom.networking.fqdn condition) - name: force use fqdn for master name (MasterHA case) set_fact: master_name={{ custom.networking.masterha_fqdn }} when: - custom.networking.fqdn.always or custom.networking.fqdn.master - - groups['masters'] | length > 1 + - groups[cluster_inventory_group.masters] | length > 1 - name: force use ip for master name (MasterHA case) set_fact: master_name={{ custom.networking.masterha_ip }} when: - not custom.networking.fqdn.always - not custom.networking.fqdn.master - - groups['masters'] | length > 1 + - groups[cluster_inventory_group.masters] | length > 1 tags: - always diff --git a/roles/common/tasks/kube_config.yml b/roles/common/tasks/kube_config.yml index 7ef5f72..3e942d0 100644 --- a/roles/common/tasks/kube_config.yml +++ b/roles/common/tasks/kube_config.yml @@ -21,7 +21,7 @@ when: - ClusterConfiguration is defined - ClusterConfiguration.cloudProvider is defined - - inventory_hostname in groups['masters'] + - inventory_hostname in groups[cluster_inventory_group.masters] tags: - kubelet diff --git a/roles/non-primary-master/tasks/main.yml b/roles/non-primary-master/tasks/main.yml index 98685f2..798b9d0 100644 --- a/roles/non-primary-master/tasks/main.yml +++ b/roles/non-primary-master/tasks/main.yml @@ -62,7 +62,7 @@ # environment -> is required due to a k8s bug which makes kubeadm need internet to generate a token. setting version is not allowed # Optionally using "--config /etc/kubernetes/kubeadm-master.conf" to get rid of the message that it tries to connect to internet for version register: kubeadm_token_whash_n - delegate_to: "{{groups['primary-master'][0]}}" + delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}" run_once: yes when: - InitConfiguration is not defined or InitConfiguration.bootstrapTokens is not defined or InitConfiguration.bootstrapTokens[0].token is not defined @@ -135,7 +135,7 @@ environment: '{{env_kc}}' shell: "/usr/bin/kubeadm init phase upload-certs --upload-certs -- 2>/dev/null | tail -1 " register: kubeadm_upload_certificate_key - delegate_to: "{{groups['primary-master'][0]}}" + delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}" run_once: yes ## TODO: try to remove this, and keep the autodetermined addr: @@ -148,7 +148,7 @@ JoinConfiguration: "{{ JoinConfiguration | combine ( { 'controlPlane': { 'certificateKey': kubeadm_upload_certificate_key.stdout_lines[0] } }, recursive=True) }}" when: - - inventory_hostname in groups['secondary-masters'] + - inventory_hostname in groups[cluster_inventory_group.secondary_masters] ### Cloud Config - name: JoinConfiguration - cloudProvider merging {{ ClusterConfiguration.cloudProvider }} to the JoinConfiguration.nodeRegistration.kubeletExtraArgs @@ -240,7 +240,7 @@ # changed_when: false # - name: Check all nodes were registered -# shell: "/usr/bin/test $(kubectl get nodes --no-headers | grep -ow Ready | wc -l) >= {{ groups['nodes'] | length + groups['masters'] | length }}" +# shell: "/usr/bin/test $(kubectl get nodes --no-headers | grep -ow Ready | wc -l) >= {{ groups[cluster_inventory_group.nodes] | length + groups[cluster_inventory_group.masters] | length }}" # register: command_result # retries: 10 # delay: 3 @@ -263,7 +263,7 @@ regexp: '^export KUBECONFIG=.*' when: - shell is undefined or shell == 'bash' - - inventory_hostname in groups['nodes'] + - inventory_hostname in groups[cluster_inventory_group.nodes] - name: export KUBECONFIG in secondary-masters' ~/.bashrc lineinfile: @@ -274,4 +274,4 @@ regexp: '^export KUBECONFIG=.*' when: - shell is undefined or shell == 'bash' - - inventory_hostname in groups['secondary-masters'] + - inventory_hostname in groups[cluster_inventory_group.secondary_masters] diff --git a/roles/post_deploy/tasks/taints.yml b/roles/post_deploy/tasks/taints.yml index 22e7295..686b659 100644 --- a/roles/post_deploy/tasks/taints.yml +++ b/roles/post_deploy/tasks/taints.yml @@ -5,7 +5,7 @@ shell: 'kubectl taint nodes --selector {{ item.label }} {{ item.label }}:{{ item.taint }} --overwrite' with_items: "{{ taint_for_label }}" when: - - groups['all'] | length > 1 + - groups[cluster_inventory_group.all] | length > 1 tags: - taint @@ -29,7 +29,7 @@ tags: - taints when: - - groups['all'] | length == 1 + - groups[cluster_inventory_group.all] | length == 1 tags: - taints diff --git a/roles/primary-master/tasks/main.yml b/roles/primary-master/tasks/main.yml index 31adb24..0c525b2 100644 --- a/roles/primary-master/tasks/main.yml +++ b/roles/primary-master/tasks/main.yml @@ -137,8 +137,8 @@ # set_fact: # InitConfiguration: "{{ InitConfiguration | combine ( { 'localAPIEndpoint': {'advertiseAddress': ansible_default_ipv4.address } }, recursive=True) }}" when: - - groups['masters'] | length > 1 - - inventory_hostname in groups['primary-master'] + - groups[cluster_inventory_group.masters] | length > 1 + - inventory_hostname in groups[cluster_inventory_group.primary_master] ### Configuration is prepared, show it, write it, use it - name: "debug: This is the master init configuration to be used (verbosity 2 or above):" @@ -166,7 +166,7 @@ --- {{ KubeletConfiguration | to_nice_yaml }} when: - - inventory_hostname in groups['primary-master'] + - inventory_hostname in groups[cluster_inventory_group.primary_master] - name: Pull images on master using /etc/kubernetes/kubeadm-master.conf (if defined, it uses images_repo; otherwise, defaults to registry.k8s.io ) command: "kubeadm config images pull --config /etc/kubernetes/kubeadm-master.conf --kubernetes-version {{ ClusterConfiguration.kubernetesVersion }} --image-repository {{ images_repo |default ('registry.k8s.io') }}" @@ -189,18 +189,18 @@ tags: - init when: - # - groups['masters'] | length > 1 # Allow this for both HA primary and non-HA (exclude ha secondary masters) - - inventory_hostname in groups['primary-master'] + # - groups[cluster_inventory_group.masters] | length > 1 # Allow this for both HA primary and non-HA (exclude ha secondary masters) + - inventory_hostname in groups[cluster_inventory_group.primary_master] - name: kubeadm_init_primary output debug: msg="{{kubeadm_init_primary.stdout_lines}}" when: - - inventory_hostname in groups['primary-master'] + - inventory_hostname in groups[cluster_inventory_group.primary_master] - name: kubeadm_init_primary output var debug: var=kubeadm_init_primary verbosity=3 when: - - inventory_hostname in groups['primary-master'] + - inventory_hostname in groups[cluster_inventory_group.primary_master] - name: "Wait 500 seconds for primary-master to respond: {{ InitConfiguration.localAPIEndpoint.advertiseAddress | default (master_name) }}:{{ InitConfiguration.localAPIEndpoint.bindPort | default (6443) }} " #master_name @@ -213,8 +213,8 @@ tags: - init when: -# - groups['masters'] | length > 1 - - inventory_hostname in groups['primary-master'] +# - groups[cluster_inventory_group.masters] | length > 1 + - inventory_hostname in groups[cluster_inventory_group.primary_master] ### TODO: remake it in 1.15 !!! # - name: generate a join token on primary-master # TEMPORARY 1.14 till this is fixed: https://github.com/kubernetes/kubeadm/issues/1485 @@ -224,7 +224,7 @@ # # environment -> is required due to a k8s bug which makes kubeadm need internet to generate a token. setting version is not allowed # # Optionally using "--config /etc/kubernetes/kubeadm-master.conf" to get rid of the message that it tries to connect to internet for version # register: kubeadm_token_whash_secondarymasters - # delegate_to: "{{groups['primary-master'][0]}}" + # delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}" # run_once: yes ### TODO: remake it in 1.15 !!! @@ -242,8 +242,8 @@ # debug: var=kubeadm_join_secondary verbosity=3 # when: - # - groups['masters'] | length > 1 - # - inventory_hostname not in groups['primary-master'] + # - groups[cluster_inventory_group.masters] | length > 1 + # - inventory_hostname not in groups[cluster_inventory_group.primary_master] # tags: # - init # - init_secondary_masters @@ -284,20 +284,20 @@ dest: /etc/kubernetes/cloud-config-vsphere-secret.yaml force: yes when: - - inventory_hostname in groups['primary-master'] + - inventory_hostname in groups[cluster_inventory_group.primary_master] - name: "vpshere apply cloud-config-vsphere-secret.yaml " environment: '{{env_kc}}' command: kubectl apply -f /etc/kubernetes/cloud-config-vsphere-secret.yaml when: - - inventory_hostname in groups['primary-master'] + - inventory_hostname in groups[cluster_inventory_group.primary_master] - name: "vpshere remove cloud-config-vsphere-secret.yaml " file: path: /etc/kubernetes/cloud-config-vsphere-secret.yaml state: absent when: - - inventory_hostname in groups['primary-master'] + - inventory_hostname in groups[cluster_inventory_group.primary_master] tags: - init @@ -331,7 +331,7 @@ # KUBECONFIG: /etc/kubernetes/admin.conf # shell: 'kubectl get no -o=jsonpath="{.items[0].metadata.name}"' # when: -# - groups['all'] | length == 1 +# - groups[cluster_inventory_group.all] | length == 1 # register: result_primary_master_name # tags: # - all @@ -344,7 +344,7 @@ # #shell: 'kubectl taint nodes {{ ansible_fqdn }} {{ item }} --overwrite' # shell: 'kubectl taint nodes {{ result_primary_master_name.stdout_lines[0] }} {{ item }} --overwrite' # when: -# - groups['all'] | length == 1 +# - groups[cluster_inventory_group.all] | length == 1 # with_items: #'{{ taints_master }}' # - 'node-role.kubernetes.io/master:NoSchedule-' # - 'node-role.kubernetes.io/master=:PreferNoSchedule' @@ -359,7 +359,7 @@ # KUBECONFIG: /etc/kubernetes/admin.conf # shell: 'kubectl taint nodes {{ inventory_hostname_short }} {{ item }} --overwrite' # when: -# - groups['all'] | length == 1 +# - groups[cluster_inventory_group.all] | length == 1 # with_items: #'{{ taints_master }}' # - 'node-role.kubernetes.io/master:NoSchedule-' # - 'node-role.kubernetes.io/master=:PreferNoSchedule' @@ -374,7 +374,7 @@ # shell: 'kubectl label nodes {{ result_primary_master_name.stdout_lines[0] }} "node-role.kubernetes.io/infra=" --overwrite' # #shell: 'kubectl label nodes {{ ansible_fqdn }} "node-role.kubernetes.io/infra=" --overwrite' # when: -# - groups['all'] | length == 1 +# - groups[cluster_inventory_group.all] | length == 1 # register: command_result # changed_when: '"not labeled" not in command_result.stdout' # ignore_errors: true @@ -384,7 +384,7 @@ # KUBECONFIG: /etc/kubernetes/admin.conf # shell: 'kubectl label nodes {{ inventory_hostname_short }} "node-role.kubernetes.io/infra=" --overwrite' # when: -# - groups['all'] | length == 1 +# - groups[cluster_inventory_group.all] | length == 1 # register: command_result # changed_when: '"not labeled" not in command_result.stdout' # ignore_errors: true @@ -411,10 +411,10 @@ changed_when: false - name: Set coredns replicas to number of masters (a good practice; by default there are 2 coredns) - shell: "export KUBECONFIG=/etc/kubernetes/admin.conf; kubectl scale --replicas={{ groups['masters'] | length }} -n kube-system deployment/coredns" + shell: "export KUBECONFIG=/etc/kubernetes/admin.conf; kubectl scale --replicas={{ groups[cluster_inventory_group.masters] | length }} -n kube-system deployment/coredns" when: - - groups['masters'] | length > 1 - - inventory_hostname in groups['primary-master'] + - groups[cluster_inventory_group.masters] | length > 1 + - inventory_hostname in groups[cluster_inventory_group.primary_master] tags: - scale - scale_dns diff --git a/roles/tools/tasks/cluster_sanity.yml b/roles/tools/tasks/cluster_sanity.yml index fa59df9..e468167 100644 --- a/roles/tools/tasks/cluster_sanity.yml +++ b/roles/tools/tasks/cluster_sanity.yml @@ -37,7 +37,7 @@ # >= and not == because we may use this role to only to add nodes also. - name: Check all nodes were registered - shell: "/usr/bin/test $(kubectl get nodes | grep -ow Ready | wc -l) -ge {{ groups['all'] | length }}" + shell: "/usr/bin/test $(kubectl get nodes | grep -ow Ready | wc -l) -ge {{ groups[cluster_inventory_group.all] | length }}" register: command_result retries: 30 delay: 3 diff --git a/roles/tools/tasks/labels.yml b/roles/tools/tasks/labels.yml index 98bc9ea..913bc3b 100644 --- a/roles/tools/tasks/labels.yml +++ b/roles/tools/tasks/labels.yml @@ -4,14 +4,14 @@ ## The below should be 4 times, (to cover nodes fqdn, nodes nofqdn, master fqdn, master nofqdn) based on: # - not custom.networking.fqdn.always # - not custom.networking.fqdn.node -# - inventory_hostname not in groups['masters'] +# - inventory_hostname not in groups[cluster_inventory_group.masters] # As it's overcomplicating and sometimes k8s still has nodes with short (even if fqdn is true), we default to: # Due to ansible in probelmatic netw setups (https://github.com/ReSearchITEng/kubeadm-playbook/issues/81, https://github.com/ansible/ansible/issues/38777 ) # we have to add the 3rd option as well... - block: - name: labeling using inventory_hostname_short - {{ inventory_hostname_short }} - (this or below 2 will end with error) - delegate_to: "{{groups['primary-master'][0]}}" + delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}" environment: KUBECONFIG: /etc/kubernetes/admin.conf shell: kubectl label nodes {{ inventory_hostname_short }} {{label | default ("node-role.kubernetes.io/compute=") }} --overwrite @@ -20,7 +20,7 @@ ignore_errors: true - name: labeling using ansible_fqdn - {{ inventory_hostname }} - (this or above or below will end with error) - delegate_to: "{{groups['primary-master'][0]}}" + delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}" environment: KUBECONFIG: /etc/kubernetes/admin.conf shell: kubectl label nodes {{ inventory_hostname }} {{label | default ("node-role.kubernetes.io/compute=") }} --overwrite @@ -29,7 +29,7 @@ ignore_errors: true - name: labeling using ansible_fqdn - {{ ansible_fqdn }} - (this or one of the above 2 will end with error) - delegate_to: "{{groups['primary-master'][0]}}" + delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}" environment: KUBECONFIG: /etc/kubernetes/admin.conf shell: kubectl label nodes {{ ansible_fqdn }} {{label | default ("node-role.kubernetes.io/compute=") }} --overwrite @@ -37,7 +37,7 @@ changed_when: '"not labeled" not in command_result.stdout' ignore_errors: true when: - - inventory_hostname not in groups['masters'] + - inventory_hostname not in groups[cluster_inventory_group.masters] tags: - all @@ -57,7 +57,7 @@ changed_when: '"not labeled" not in command_result.stdout' ignore_errors: true when: - - groups['all'] | length == 1 + - groups[cluster_inventory_group.all] | length == 1 tags: - all diff --git a/roles/tools/tasks/postinstall_messages.yml b/roles/tools/tasks/postinstall_messages.yml index 0cbf101..d1bb56c 100644 --- a/roles/tools/tasks/postinstall_messages.yml +++ b/roles/tools/tasks/postinstall_messages.yml @@ -96,7 +96,7 @@ - to check pods which are not yet in Running status, run: "kubectl get --all-namespaces pods --no-headers | grep -v -w 'Running' " - browse your master (using fqdn) to see the dashboard: - http://{{groups["masters"][0]}} + http://{{groups[cluster_inventory_group.masters][0]}} or, ideally (depending what was defined in config) http://dashboard.{{custom.networking.dnsDomain}} (when var custom.networking.dnsDomain properly defined and set in your dns ) debug: var=msg diff --git a/roles/tools/tasks/reset.yml b/roles/tools/tasks/reset.yml index aaaf770..cc30828 100644 --- a/roles/tools/tasks/reset.yml +++ b/roles/tools/tasks/reset.yml @@ -18,7 +18,7 @@ - uninstall ignore_errors: true when: - - groups['masters'] | length > 1 + - groups[cluster_inventory_group.masters] | length > 1 - ( custom.networking.masterha_type | default('vip') ) == 'vip' # We had to remove it, as it blocks the flow. It also fetches docker.io images and in some setups there is no access to or fails due to limits on docker hub... diff --git a/roles/tools/tasks/reset_drain.yml b/roles/tools/tasks/reset_drain.yml index d2065f6..d3a3c7d 100644 --- a/roles/tools/tasks/reset_drain.yml +++ b/roles/tools/tasks/reset_drain.yml @@ -32,7 +32,7 @@ ignore_errors: true #shell: "kubectl get nodes -o jsonpath='{.items[*].metadata.name}'" - #with_items: "{{ groups['nodes'] }}" + #with_items: "{{ groups[cluster_inventory_group.nodes] }}" - name: kubectl get nodes must be empty by now (if target was full cluster and not partial update) shell: "kubectl get nodes --no-headers | grep -v 'node-role.kubernetes.io/control-plane' | grep -v -w 'Ready' || true" diff --git a/site.yml b/site.yml index 837aadb..1606a21 100644 --- a/site.yml +++ b/site.yml @@ -80,9 +80,9 @@ tags: - node roles: - - { role: tools, task: reset, tags: [ 'reset', 'node_reset' ], when: "inventory_hostname not in groups['masters']" } - - { role: tools, task: weave_reset, tags: [ 'reset', 'node_reset', 'network_reset', 'weave_reset', 'weave' ], when: "inventory_hostname not in groups['masters']" } - - { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups['masters']" } + - { role: tools, task: reset, tags: [ 'reset', 'node_reset' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } + - { role: tools, task: weave_reset, tags: [ 'reset', 'node_reset', 'network_reset', 'weave_reset', 'weave' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } + - { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } ## master -> reset and install common part (for all masters - and sometimes etcd when colocated with masters) - hosts: masters @@ -109,10 +109,10 @@ - role: keepalived tags: [ 'master', 'install', 'master_install', 'ha', 'keepalived'] when: - - ( groups['masters'] | length ) > 1 + - ( groups[cluster_inventory_group.masters] | length ) > 1 - ( custom.networking.masterha_type | default('vip') ) == 'vip' -- hosts: primary-master +- hosts: primary-master name: primary-master (or master in general) - it applies to both ha and non-ha become: yes become_method: sudo @@ -147,7 +147,7 @@ - install - node_install roles: - - { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups['masters']" } + - { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" } ## node -> label nodes (even when master is also a node) - hosts: nodes diff --git a/templates/dashboard_profile1.j2 b/templates/dashboard_profile1.j2 index 942b642..ecbb3fd 100644 --- a/templates/dashboard_profile1.j2 +++ b/templates/dashboard_profile1.j2 @@ -4,8 +4,8 @@ ingress: enabled: true hosts: - 'dashboard.{{ custom.networking.dnsDomain }}' - - '{{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }}' - - '{{ groups["primary-master"][0] }}' + - '{{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }}' + - '{{ groups[cluster_inventory_group.primary_master][0] }}' metricsScraper: enabled: true image: