-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
318 lines (300 loc) · 8.58 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
# Default values for zenko.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# By default, MongoDB, Redis-HA, Zenko-Queue, and Zenko-Quorum
# will use this value for their replica count. Typically, this
# is equivalent to the number of nodes in a Kubernetes Cluster.
nodeCount: &nodeCount 3
ingress:
enabled: false
# Used to create an Ingress record.
# This must match the 'cloudserver' 'endpoint', unless your client
# supports different hostnames.
hosts:
- zenko.local
max_body_size: 100m
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: zenko-tls
# hosts:
# - zenko.example.com
global:
orbit:
enabled: true
endpoint: https://api.zenko.io
# When 'orbit.enabled' is 'true', these aren't used, please use
# https://zenko.io to manage your deployment
locationConstraints: {}
replicationEndpoints: []
s3Utils:
image:
repository: zenko/s3utils
tag: "1.3"
pullPolicy: IfNotPresent
maintenance:
# Enables a retry CronJobs that will attempt to retry CRR objects stuck in
# pending or failed state based off the specified schedule. For Orbit enabled
# installs, Zenko will need to be registered and account credentials
# (secretKey and accessKey) will need to be provisioned prior to enabling this
# maintenance feature.
enabled: false
accessKey: ""
secretKey: ""
successfulJobsHistory: 1
retryFailed:
schedule: "*/10 * * * *"
retryPending:
schedule: "*/10 * * * *"
debug:
# This provisions a pod with utilities to help debug Zenko from within the
# cluster. Credentials can be added here that will be applied to the pods
# environment variables and can be used for debugging but not necessary to
# instanciate the pod.
enabled: false
accessKey: ""
secretKey: ""
kafkaClient:
image:
repository: solsson/kafka
tag: 0.11.0.0
pullPolicy: IfNotPresent
# Enables reporting cronjobs that are essential for displaying information
# in Orbit like total number of objects in the namespace
reporting:
enabled: true
successfulJobsHistory: 1
countItems:
schedule: "@hourly"
cloudserver:
replicaCount: *nodeCount
replicaFactor: 10
mongodb:
replicas: *nodeCount
endpoint: zenko.local
users: {}
# accountName:
# access:
# secret:
backbeat:
replication:
dataProcessor:
replicaCount: *nodeCount
# If replicaFactor is increased, make sure to increase the
# number of partitions for "backbeat-replication" topic
# accordingly
replicaFactor: 1
statusProcessor:
replicaCount: *nodeCount
lifecycle:
bucketProcessor:
replicaCount: *nodeCount
objectProcessor:
replicaCount: *nodeCount
garbageCollector:
consumer:
replicaCount: *nodeCount
ingestion:
enabled: true
consumer:
replicaCount: *nodeCount
mongodb:
replicas: *nodeCount
cosmos:
enabled: true
## Specify an existing storageClass to use
# storageClass:
rbac:
create: true
## If set to true creates role and role binding instead of
## default clusterrole and clusterrole binding
namespaced: false
operator:
image:
repository: zenko/cosmos-operator
tag: 0.4.3
pullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
scheduler:
image:
repository: zenko/cosmos-scheduler
tag: 0.4.2
pullPolicy: IfNotPresent
## A namespace to watch can be specified otherwise will default to the
## installed namespace
# namespace: default
schedule: "* */12 * * *"
resources:
limits:
cpu: 50m
memory: 64Mi
zenko-nfs:
enabled: false
mongodb:
replicas: *nodeCount
prometheus:
rbac:
create: true
alertmanager:
enabled: false
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
pushgateway:
enabled: false
server:
replicaCount: 2
affinity: |
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 5
podAffinityTerm:
topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "prometheus.name" . }}
release: {{ .Release.Name | quote }}
component: server
## Adds a host_ip label to all the zenko pods
## to allow for metrics aggregation by node
serverFiles:
prometheus.yml:
scrape_configs:
- job_name: 'zenko-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_node_name]
action: replace
target_label: host_node
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
mongodb-replicaset:
image:
tag: 3.6.8
replicaSetName: rs0
replicas: *nodeCount
podDisruptionBudget:
maxUnavailable: 1
metrics:
enabled: true
securityContext:
runAsUser: 1000
fsGroup: 1000
runAsNonRoot: true
persistentVolume:
size: 50Gi
zenko-queue:
## Extensive list of configurables can be found here:
## https://github.com/kubernetes/charts/blob/master/incubator/kafka/values.yaml
replicas: *nodeCount
kafkaHeapOptions: "-Xmx6G -Xms1G"
rbac:
enabled: true
configurationOverrides:
"offsets.topic.replication.factor": 3 # - replication factor for the offsets topic
"auto.create.topics.enable": false # - enable auto creation of topic on the server
"min.insync.replicas": 2 # - min number of replicas that must acknowledge a write
"message.max.bytes": "5000000" # - the largest record batch size allowed
prometheus:
jmx:
enabled: true
kafka:
enabled: true
topics:
- name: backbeat-gc
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-ingestion
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-lifecycle-object-tasks
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-lifecycle-bucket-tasks
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-metrics
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-replication
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-replication-status
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-replication-failed
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-data-mover
partitions: *nodeCount
replicationFactor: 3
- name: backbeat-sanitycheck
partitions: 1
replicationFactor: 3
persistence:
size: 20Gi
zenko-quorum:
## Extensive list of configurables can be found here:
## https://github.com/kubernetes/charts/blob/master/incubator/zookeeper/values.yaml
replicaCount: *nodeCount
exporters:
jmx:
enabled: true
redis-ha:
replicas: *nodeCount
redis:
masterGroupName: zenko
config:
save: "60 1"
stop-writes-on-bgsave-error: "yes"
slave-serve-stale-data: "yes"
sentinel:
config:
down-after-milliseconds: 5000
failover-timeout: 60000
grafana:
sidecar:
image: zenko/grafana-sidecar:0.1
datasources:
enabled: true
# Every config map with the following label will be used as datasource. By default,
# promtheus is set as datasource under zenko/templates/datasources.yaml
label: grafana-datasource
dashboards:
enabled: true
# Likewise, every config map with the following label will be used as a source for
# a dashboard.
label: grafana-dashboard
zenko-queue-manager:
zkHosts: "{{ .Release.Name }}-zenko-quorum-headless:2181"
clusters:
- name: "zenko-queue-cluster"
tuning: {}