From 7bcd9ee9576d62afe3c72b021c9062f95de0d6e0 Mon Sep 17 00:00:00 2001 From: Paulo Pires Date: Tue, 21 Aug 2018 14:38:46 +0100 Subject: [PATCH] Bump Elasticsearch to v6.3.2 Signed-off-by: Paulo Pires --- README.md | 59 +++++++++++++++++----------------- es-data.yaml | 2 +- es-ingest.yaml | 2 +- es-master.yaml | 2 +- kibana.yaml | 2 +- stateful/es-data-stateful.yaml | 2 +- 6 files changed, 35 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index a929530..79c6bb5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # kubernetes-elasticsearch-cluster -Elasticsearch (6.3.0) cluster on top of Kubernetes made easy. +Elasticsearch (6.3.2) cluster on top of Kubernetes made easy. ### Table of Contents @@ -52,9 +52,8 @@ Given this, I'm going to demonstrate how to provision a production grade scenari ## Pre-requisites -* Kubernetes 1.9.x (tested with v1.10.4 on top of [Vagrant + CoreOS](https://github.com/pires/kubernetes-vagrant-coreos-cluster)), thas's because curator is a CronJob object which comes from `batch/v2alpha1`, to enable it, just add - `--runtime-config=batch/v2alpha1=true` into your kube-apiserver options. -* `kubectl` configured to access the cluster master API Server +* Kubernetes 1.11.x (tested with v1.11.2 on top of [Vagrant + CoreOS](https://github.com/pires/kubernetes-vagrant-coreos-cluster)). +* `kubectl` configured to access the Kubernetes API. @@ -81,26 +80,27 @@ kubectl rollout status -f es-data.yaml ``` Let's check if everything is working properly: + ```shell kubectl get svc,deployment,pods -l component=elasticsearch -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/elasticsearch ClusterIP 10.100.32.137 9200/TCP 1h -service/elasticsearch-discovery ClusterIP None 9300/TCP 1h -service/elasticsearch-ingest ClusterIP 10.100.31.141 9200/TCP 1h +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/elasticsearch ClusterIP 10.100.243.196 9200/TCP 3m +service/elasticsearch-discovery ClusterIP None 9300/TCP 3m +service/elasticsearch-ingest ClusterIP 10.100.76.74 9200/TCP 2m NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.extensions/es-data 2 2 2 2 4m -deployment.extensions/es-ingest 2 2 2 2 7m -deployment.extensions/es-master 3 3 3 3 7m - -NAME READY STATUS RESTARTS AGE -pod/es-data-5c5969967-wb2b8 1/1 Running 0 4m -pod/es-data-5c5969967-wrrxk 1/1 Running 0 4m -pod/es-ingest-548b65475-6s7hg 1/1 Running 0 7m -pod/es-ingest-548b65475-whvqx 1/1 Running 0 7m -pod/es-master-879576496-dhnlp 1/1 Running 0 7m -pod/es-master-879576496-jjlvf 1/1 Running 0 7m -pod/es-master-879576496-sgwxf 1/1 Running 0 7m +deployment.extensions/es-data 2 2 2 2 1m +deployment.extensions/es-ingest 2 2 2 2 2m +deployment.extensions/es-master 3 3 3 3 3m + +NAME READY STATUS RESTARTS AGE +pod/es-data-56f8ff8c97-642bq 1/1 Running 0 1m +pod/es-data-56f8ff8c97-h6hpc 1/1 Running 0 1m +pod/es-ingest-6ddd5fc689-b4s94 1/1 Running 0 2m +pod/es-ingest-6ddd5fc689-d8rtj 1/1 Running 0 2m +pod/es-master-68bf8f86c4-bsfrx 1/1 Running 0 3m +pod/es-master-68bf8f86c4-g8nph 1/1 Running 0 3m +pod/es-master-68bf8f86c4-q5khn 1/1 Running 0 3m ``` As we can assert, the cluster seems to be up and running. Easy, wasn't it? @@ -113,29 +113,29 @@ As we can assert, the cluster seems to be up and running. Easy, wasn't it? ```shell kubectl get svc elasticsearch -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -elasticsearch ClusterIP 10.100.32.137 9200/TCP 1h +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +elasticsearch ClusterIP 10.100.243.196 9200/TCP 3m ``` From any host on the Kubernetes cluster (that's running `kube-proxy` or similar), run: ```shell -curl http://10.100.32.137:9200 +curl http://10.100.243.196:9200 ``` One should see something similar to the following: ```json { - "name" : "es-data-5c5969967-wb2b8", + "name" : "es-data-56f8ff8c97-642bq", "cluster_name" : "myesdb", - "cluster_uuid" : "qSps-b9dRI2ngGHBguJ44Q", + "cluster_uuid" : "RkRkTl26TDOE7o0FhCcW_g", "version" : { - "number" : "6.3.0", + "number" : "6.3.2", "build_flavor" : "default", "build_type" : "tar", - "build_hash" : "424e937", - "build_date" : "2018-06-11T23:38:03.357887Z", + "build_hash" : "053779d", + "build_date" : "2018-07-20T05:20:23.451332Z", "build_snapshot" : false, "lucene_version" : "7.3.1", "minimum_wire_compatibility_version" : "5.6.0", @@ -148,7 +148,7 @@ One should see something similar to the following: Or if one wants to see cluster information: ```shell -curl http://10.100.32.137:9200/_cluster/health?pretty +curl http://10.100.243.196:9200/_cluster/health?pretty ``` One should see something similar to the following: @@ -184,6 +184,7 @@ It is then **highly recommended**, in the context of the solution described in t in order to guarantee that two data pods will never run on the same node. Here's an example: + ```yaml spec: affinity: diff --git a/es-data.yaml b/es-data.yaml index 3d60e3f..fcb5165 100644 --- a/es-data.yaml +++ b/es-data.yaml @@ -24,7 +24,7 @@ spec: privileged: true containers: - name: es-data - image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.0 + image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.2 env: - name: NAMESPACE valueFrom: diff --git a/es-ingest.yaml b/es-ingest.yaml index fff8bf2..3c4808a 100644 --- a/es-ingest.yaml +++ b/es-ingest.yaml @@ -24,7 +24,7 @@ spec: privileged: true containers: - name: es-ingest - image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.0 + image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.2 env: - name: NAMESPACE valueFrom: diff --git a/es-master.yaml b/es-master.yaml index 8ae11ee..73faa6c 100644 --- a/es-master.yaml +++ b/es-master.yaml @@ -24,7 +24,7 @@ spec: privileged: true containers: - name: es-master - image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.0 + image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.2 env: - name: NAMESPACE valueFrom: diff --git a/kibana.yaml b/kibana.yaml index 1faba6e..2d930e5 100644 --- a/kibana.yaml +++ b/kibana.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: kibana - image: docker.elastic.co/kibana/kibana-oss:6.3.0 + image: docker.elastic.co/kibana/kibana-oss:6.3.2 env: - name: CLUSTER_NAME value: myesdb diff --git a/stateful/es-data-stateful.yaml b/stateful/es-data-stateful.yaml index 8120370..40e4fdf 100644 --- a/stateful/es-data-stateful.yaml +++ b/stateful/es-data-stateful.yaml @@ -25,7 +25,7 @@ spec: privileged: true containers: - name: es-data - image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.0 + image: quay.io/pires/docker-elasticsearch-kubernetes:6.3.2 env: - name: NAMESPACE valueFrom: