-
Notifications
You must be signed in to change notification settings - Fork 0
/
Taskfile.dist.yaml
182 lines (152 loc) · 8.09 KB
/
Taskfile.dist.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: '3'
dotenv: [ .env.sops.common, .env.sops.staging ]
# dotenv: [ .env.sops.common, .env.sops.production ]
vars:
REPOSITORY_DIR:
sh: git rev-parse --show-toplevel
_OUTPUT_ROOT_DIR: '{{ .OUTPUT_ROOT_DIR | default .REPOSITORY_DIR }}'
_NORMALIZED_OUTPUT_ROOT_DIR: '{{ ._OUTPUT_ROOT_DIR | replace "_out/" "" | replace "_out" "" }}/_out'
_OUTPUT_ENV_DIR: '{{ ._NORMALIZED_OUTPUT_ROOT_DIR }}/{{ .ENVIRONMENT_NAME }}'
_NORMALIZED_OUTPUT_ENV_DIR: '{{ ._OUTPUT_ENV_DIR | lower }}'
OUTPUT_DIR: '{{ ._NORMALIZED_OUTPUT_ENV_DIR }}'
REPOSITORY_REMOTE_ORIGIN_NAME:
sh: basename -s .git `git config --get remote.origin.url`
PRIMARY_NODE_HOSTNAME: k8s-cp01
includes:
_core: .taskfiles/_Core.yaml
azure:
taskfile: .taskfiles/AzureTasks.yaml
aliases: [ az ]
cilium:
taskfile: .taskfiles/CiliumTasks.yaml
aliases: [ cil ]
cloudflareApiTasks:
taskfile: .taskfiles/CloudflareApiTasks.yaml
aliases: [ cf ]
csidrivers: .taskfiles/CsiDrivers.yaml
docker: .taskfiles/DockerTasks.yaml
flux: .taskfiles/FluxTasks.yaml
kubectl: .taskfiles/Kubectl.yaml
misc: .taskfiles/MiscTasks.yaml
# openssl: .taskfiles/OpenSslTasks.yaml
pki: .taskfiles/PkiTasks.yaml
precommit:
taskfile: .taskfiles/PreCommitTasks.yaml
aliases: [ pc ]
# sidero: .taskfiles/SideroTasks.yaml
sops: .taskfiles/SopsTasks.yaml
talos: .taskfiles/TalosTasks.yaml
terraform:
taskfile: .taskfiles/TerraformTasks.yaml
aliases: [ tf ]
tetragon: .taskfiles/TetragonTasks.yaml
tooling: .taskfiles/ToolingTasks.yaml
silent: true
tasks:
default:
silent: true
cmds: [ task -l ]
prepare-environment-pki:
desc: Example that generates all of the PKI files from scratch
cmds:
- task: _core:display-banner
- task: _core:message
vars: { MESSAGE: 'Generating PKI from scratch for: {{.ENVIRONMENT_NAME}}' }
- task: _core:message
vars: { MESSAGE: 'Outputting to folder: {{.ENV_OUTPUT_DIR}}\n------------------------------------------------------' }
### Root CA (should be stored OFFLINE somewhere safe...like in a...safe)
#
# Note: You will probably need the public-portion of this to create a bundle later for cert-chain trust reasons
# Note: You may need to adjust the Root CA "pathlen" value if you attempt to go deeper than two-levels of Intermediaries
- task: pki:generate-root-ca
vars: { OUTPUT_FILENAME: my_staging_root-ca }
### Azure related
- task: pki:generate-intermediate-ca
vars: { OUTPUT_FILENAME: azure/azure_intermediate-ca, INTERMEDIARY_NAME: Azure }
- task: pki:sign-intermediate-ca
vars: { SIGNING_KEY_FILENAME: my_staging_root-ca, INTERMEDIATE_CA_FILENAME: azure/azure_intermediate-ca }
# Azure Tenant Provisioner SP for Terraform
- task: pki:generate-client-certificate
vars: { SIGNING_KEY_FILENAME: azure/azure_intermediate-ca, CERT_CN: terraform.provisioner, OUTPUT_FILENAME: azure/terraform.provisioner }
### Kubernetes required Intermediary CAs and Certs
#
# See: https://kubernetes.io/docs/setup/best-practices/certificates/
- task: pki:generate-intermediate-ca
vars: { OUTPUT_FILENAME: kubernetes/kubernetes-intermediate, INTERMEDIARY_NAME: Kubernetes }
- task: pki:sign-intermediate-ca
vars: { SIGNING_KEY_FILENAME: my_staging_root-ca, INTERMEDIATE_CA_FILENAME: kubernetes/kubernetes-intermediate }
- task: pki:generate-kubernetes-intermediate-ca
vars: { OUTPUT_FILENAME: kubernetes/ca, CERT_CN: kubernetes-ca, SIGNING_KEY_FILENAME: kubernetes/kubernetes-intermediate }
- task: pki:sign-intermediate-ca
vars: { SIGNING_KEY_FILENAME: kubernetes/kubernetes-intermediate, INTERMEDIATE_CA_FILENAME: kubernetes/ca }
- task: pki:generate-kubernetes-intermediate-ca
vars: { OUTPUT_FILENAME: kubernetes/etcd/ca, CERT_CN: etcd-ca, SIGNING_KEY_FILENAME: kubernetes/kubernetes-intermediate }
- task: pki:sign-intermediate-ca
vars: { SIGNING_KEY_FILENAME: kubernetes/kubernetes-intermediate, INTERMEDIATE_CA_FILENAME: kubernetes/etcd/ca }
- task: pki:generate-kubernetes-intermediate-ca
vars: { OUTPUT_FILENAME: kubernetes/front-proxy-ca, CERT_CN: kubernetes-front-proxy-ca, SIGNING_KEY_FILENAME: kubernetes/kubernetes-intermediate }
- task: pki:sign-intermediate-ca
vars: { SIGNING_KEY_FILENAME: kubernetes/kubernetes-intermediate, INTERMEDIATE_CA_FILENAME: kubernetes/front-proxy-ca }
- task: pki:generate-k8s-service-account-key
vars: { OUTPUT_FILENAME: kubernetes/sa }
- task: pki:prepare-k8s-directory
### OPNSense related
- task: pki:generate-intermediate-ca
vars: { OUTPUT_FILENAME: opnsense/opnsense-intermediate, INTERMEDIARY_NAME: OPNsense }
- task: pki:sign-intermediate-ca
vars: { SIGNING_KEY_FILENAME: my_staging_root-ca, INTERMEDIATE_CA_FILENAME: opnsense/opnsense-intermediate }
- task: pki:generate-server-certificate
vars: { SIGNING_KEY_FILENAME: opnsense/opnsense-intermediate, CERT_CN: opnsense01.home.arpa, SERVER_NAME: opnsense01, SERVER_IP: '{{.OPNSENSE01_MANAGEMENT_IP}}', OUTPUT_FILENAME: opnsense/opnsense01.home.arpa }
- task: pki:generate-server-certificate
vars: { SIGNING_KEY_FILENAME: opnsense/opnsense-intermediate, CERT_CN: opnsense02.home.arpa, SERVER_NAME: opnsense02, SERVER_IP: '{{.OPNSENSE02_MANAGEMENT_IP}}', OUTPUT_FILENAME: opnsense/opnsense02.home.arpa }
- task: pki:generate-client-certificate
vars: { SIGNING_KEY_FILENAME: my_staging_root-ca, CERT_CN: my_client_certificate, OUTPUT_FILENAME: clients/my_client_certificate }
prepare-environment-configs:
cmds:
- task: talos:generate-secrets-bundle-from-pki
vars: { K8S_PKI_DIR: '{{.ENV_OUTPUT_DIR}}/pki/kubernetes' }
- task: talos:generate-machine-configs
vars: { ARCH: amd64 }
- task: talos:generate-machine-configs
vars: { ARCH: arm64 }
- task: cilium:prepare-helm-manifest
# Note: There are no tasks here dealing with Windows DSC provisioning prior to running this task.
# You must run the Windows DSC scripts in an elevated (OLD) PowerShell or PowerShell ISE session.
config-hyperv-cluster:
desc: Configures a set of pre-existing Hyper-V VMs
cmds:
- task: _core:display-banner
- task: _core:message
vars: { MESSAGE: Configuring Hyper-V based Talos Kubernetes cluster... }
- task: _core:important-message
vars: { MESSAGE: Please turn-on and boot the Control Plane nodes }
# This can probably be parallelized with jobs or something since the nodes are always
# going to start-up in a staggered fashion depending on how the hypervisor is feeling
- task: talos:apply-machine-template
vars: { NODE_NAME: '{{.PRIMARY_NODE_HOSTNAME}}', NODE_TYPE: controlplane }
- task: talos:apply-machine-template
vars: { NODE_NAME: k8s-cp02, NODE_TYPE: controlplane }
- task: talos:apply-machine-template
vars: { NODE_NAME: k8s-cp03, NODE_TYPE: controlplane }
- task: talos:bootstrap-etcd
vars: { NODE_NAME: '{{.PRIMARY_NODE_HOSTNAME}}' }
- task: talos:get-kubeconfig
vars: { NODE_NAME: '{{.PRIMARY_NODE_HOSTNAME}}' }
- task: cilium:apply-helm-manifest
vars: { NODE_NAME: '{{.PRIMARY_NODE_HOSTNAME}}' }
- task: _core:important-message
vars: { MESSAGE: Please turn-on and boot the Data Plane nodes }
- task: talos:apply-machine-template
vars: { NODE_NAME: k8s-wk01, NODE_TYPE: worker }
- task: talos:apply-machine-template
vars: { NODE_NAME: k8s-wk02, NODE_TYPE: worker }
- task: _core:message
vars: { MESSAGE: Verifying worker nodes are in 'Ready' status... }
- ./scripts/kubernetes/wait-for-node-ready.sh k8s-wk01
- ./scripts/kubernetes/wait-for-node-ready.sh k8s-wk02
- task: _core:message
vars: { MESSAGE: Labeling worker node(s) with 'node-role.kubernetes.io/worker'... }
- kubectl label nodes k8s-wk01 node-role.kubernetes.io/worker=
- kubectl label nodes k8s-wk02 node-role.kubernetes.io/worker=