From 368751ee39bb6fb3f39766df7d84459a23fa22ba Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Tue, 30 Apr 2019 18:28:33 +0800 Subject: [PATCH 01/77] First cut of ci with kind --- .travis.yml | 11 ++-- ci/install-kind.sh | 80 ++++++++++++++++++++++++++ ci/{install.sh => install-minikube.sh} | 0 ci/kind-1.14-default.env | 11 ++++ ci/test.sh | 4 +- ci/vagrant-run-kind.sh | 13 +++++ ci/vagrant-run.sh | 2 +- ci/xenial-setup.sh | 1 + 8 files changed, 114 insertions(+), 8 deletions(-) create mode 100755 ci/install-kind.sh rename ci/{install.sh => install-minikube.sh} (100%) create mode 100644 ci/kind-1.14-default.env create mode 100755 ci/vagrant-run-kind.sh diff --git a/.travis.yml b/.travis.yml index 6dd446943a..009948ce99 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,8 +8,8 @@ services: - docker install: - pip3 install --no-cache-dir -r dev-requirements.txt - - . "ci/minikube-${SCENARIO}.env" - - ./ci/install.sh + - . "ci/${RUNNER}-${SCENARIO}.env" + - ./ci/install-${RUNNER}.sh script: - ./ci/travis-script.sh @@ -17,9 +17,10 @@ env: # Different scenarios described in # /ci/minikube-${SCENARIO}.env matrix: - - SCENARIO=1.13-default - - SCENARIO=1.12-netpol - - SCENARIO=1.11-default + - SCENARIO=1.13-default RUNNER=minikube + - SCENARIO=1.12-netpol RUNNER=minikube + - SCENARIO=1.11-default RUNNER=minikube + - SCENARIO=1.14-default RUNNER=kind global: - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= diff --git a/ci/install-kind.sh b/ci/install-kind.sh new file mode 100755 index 0000000000..ab8897b7af --- /dev/null +++ b/ci/install-kind.sh @@ -0,0 +1,80 @@ +#!/bin/bash +set -ex + +mkdir -p bin + +# nsenter is included on xenial + +# install socat (required by helm) +sudo apt-get update && sudo apt-get install -y socat + +# install kubectl, minikube +# based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube +if ! [ -f "bin/kubectl" ]; then + echo "installing kubectl" + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl + chmod +x kubectl + mv kubectl bin/ +fi + +if ! [ -f "bin/kind" ]; then + echo "installing kind" + curl -Lo kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64 + chmod +x kind + mv kind bin/ +fi + +# FIXME: Workaround missing crictl on K8s 1.11 only +# - Issue: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1123 +# - CI fail: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/485093909 +if [ ! -z "${CRICTL_VERSION}" ]; then + echo "installing crictl" + if ! [ -f bin/crictl-${CRICTL_VERSION} ]; then + curl -sSLo bin/crictl-${CRICTL_VERSION}.tar.gz https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz + tar --extract --file bin/crictl-${CRICTL_VERSION}.tar.gz --directory bin + rm bin/crictl-${CRICTL_VERSION}.tar.gz + mv bin/crictl bin/crictl-${CRICTL_VERSION} + fi + cp bin/crictl-${CRICTL_VERSION} bin/crictl + # minikube is run with sudo so the modified PATH is lost + sudo ln -s "${PWD}/bin/crictl-${CRICTL_VERSION}" /usr/bin/crictl +fi + + +echo "installing kubeval" +if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then + curl -sSLo bin/kubeval-${KUBEVAL_VERSION}.tar.gz https://github.com/garethr/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz + tar --extract --file bin/kubeval-${KUBEVAL_VERSION}.tar.gz --directory bin + rm bin/kubeval-${KUBEVAL_VERSION}.tar.gz + mv bin/kubeval bin/kubeval-${KUBEVAL_VERSION} +fi +cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval + +echo "starting minikube with RBAC" +CHANGE_MINIKUBE_NONE_USER=true $PWD/bin/kind create cluster --image kindest/node:v${KUBE_VERSION} +export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" + +echo "waiting for kubernetes" +JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' +until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do + sleep 1 +done +kubectl get nodes + +echo "installing helm" +curl -ssL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ + | tar -xz -C bin --strip-components 1 linux-amd64/helm +chmod +x bin/helm + +kubectl --namespace kube-system create sa tiller +kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller +helm init --service-account tiller + + +echo "waiting for tiller" +kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy + +echo "installing git-crypt" +curl -L https://github.com/minrk/git-crypt-bin/releases/download/0.5.0/git-crypt > bin/git-crypt +echo "46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b bin/git-crypt" | shasum -a 256 -c - +chmod +x bin/git-crypt diff --git a/ci/install.sh b/ci/install-minikube.sh similarity index 100% rename from ci/install.sh rename to ci/install-minikube.sh diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env new file mode 100644 index 0000000000..c97d83be3e --- /dev/null +++ b/ci/kind-1.14-default.env @@ -0,0 +1,11 @@ +export KUBE_VERSION=1.14.0 +export KIND_VERSION=0.2.1 +export HELM_VERSION=2.12.3 +export KUBEVAL_VERSION=0.7.3 +export PATH="$PWD/bin:$PATH" + +export Z2JH_HELM_ARGS="-f minikube-config.yaml" +export DISABLE_TEST_NETPOL=1 +export INSTALL_CALICO=0 + +export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/test.sh b/ci/test.sh index a8ab0f78df..f0fac03499 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -4,7 +4,7 @@ set -eux # Is there a standard interface name? for iface in eth0 ens4 enp0s3; do - IP=$(ifconfig $iface | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'); + IP=$(/sbin/ifconfig $iface | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'); if [ -n "$IP" ]; then echo "IP: $IP" break @@ -12,7 +12,7 @@ for iface in eth0 ens4 enp0s3; do done if [ -z "$IP" ]; then echo "Failed to get IP, current interfaces:" - ifconfig -a + /sbin/ifconfig -a exit 2 fi diff --git a/ci/vagrant-run-kind.sh b/ci/vagrant-run-kind.sh new file mode 100755 index 0000000000..4a5a5f5f59 --- /dev/null +++ b/ci/vagrant-run-kind.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# Run this inside vagrant to test the travis scripts + +set -eux +export SCENARIO=1.14 +export TRAVIS_BRANCH=master +export TRAVIS_PULL_REQUEST=true +export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse --short HEAD` + +pip3 install --no-cache-dir -r dev-requirements.txt +. ./ci/kind-${SCENARIO}.env +./ci/install-kind.sh +./ci/travis-script.sh diff --git a/ci/vagrant-run.sh b/ci/vagrant-run.sh index 9eb6226042..fed4d2f91a 100755 --- a/ci/vagrant-run.sh +++ b/ci/vagrant-run.sh @@ -9,5 +9,5 @@ export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse pip3 install --no-cache-dir -r dev-requirements.txt . ./ci/minikube-${SCENARIO}.env -./ci/install.sh +./ci/install-minikube.sh ./ci/travis-script.sh diff --git a/ci/xenial-setup.sh b/ci/xenial-setup.sh index 1a75d74595..a28ff44692 100755 --- a/ci/xenial-setup.sh +++ b/ci/xenial-setup.sh @@ -13,6 +13,7 @@ curl -O https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/ # dpkg won't install dependencies dpkg -i $DOCKER_DEB || apt-get install -f -y docker info +usermod -G docker vagrant install -o vagrant -g vagrant -d /home/vagrant/bin From 09d566e7283b8f5cab6b5774991ed27754b594a0 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Tue, 30 Apr 2019 20:24:52 +0800 Subject: [PATCH 02/77] allow TEST_URL to be kubectl forwarded port to accommodate kind --- ci/test.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/ci/test.sh b/ci/test.sh index f0fac03499..c6386c533b 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -17,9 +17,16 @@ if [ -z "$IP" ]; then fi TEST_NAMESPACE=jupyterhub-test -TEST_URL=http://$IP:31212 -helm install --name jupyterhub-test --namespace $TEST_NAMESPACE ./jupyterhub/ $Z2JH_HELM_ARGS +helm install --wait --name jupyterhub-test --namespace $TEST_NAMESPACE ./jupyterhub/ $Z2JH_HELM_ARGS + +if [ "$RUNNER" = "kind" ]; then + kubectl port-forward -n $TEST_NAMESPACE svc/proxy-public 8080:80 & + TEST_URL=http://127.0.0.1:8080 + export HUB_API_URL=http://127.0.0.1:8080/hub/api +else + TEST_URL=http://$IP:31212 +fi echo "waiting for servers to become responsive" until curl --fail -s $TEST_URL/hub/api; do From 39fbeddf66ff53ba0d00a966cdb9fff5498c71f8 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Tue, 30 Apr 2019 21:21:43 +0800 Subject: [PATCH 03/77] get correct KUBECONFIG during tests --- ci/install-kind.sh | 23 +++-------------------- ci/test.sh | 4 ++++ 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/ci/install-kind.sh b/ci/install-kind.sh index ab8897b7af..1581f65033 100755 --- a/ci/install-kind.sh +++ b/ci/install-kind.sh @@ -8,7 +8,7 @@ mkdir -p bin # install socat (required by helm) sudo apt-get update && sudo apt-get install -y socat -# install kubectl, minikube +# install kubectl, kind # based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube if ! [ -f "bin/kubectl" ]; then echo "installing kubectl" @@ -24,23 +24,6 @@ if ! [ -f "bin/kind" ]; then mv kind bin/ fi -# FIXME: Workaround missing crictl on K8s 1.11 only -# - Issue: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1123 -# - CI fail: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/485093909 -if [ ! -z "${CRICTL_VERSION}" ]; then - echo "installing crictl" - if ! [ -f bin/crictl-${CRICTL_VERSION} ]; then - curl -sSLo bin/crictl-${CRICTL_VERSION}.tar.gz https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz - tar --extract --file bin/crictl-${CRICTL_VERSION}.tar.gz --directory bin - rm bin/crictl-${CRICTL_VERSION}.tar.gz - mv bin/crictl bin/crictl-${CRICTL_VERSION} - fi - cp bin/crictl-${CRICTL_VERSION} bin/crictl - # minikube is run with sudo so the modified PATH is lost - sudo ln -s "${PWD}/bin/crictl-${CRICTL_VERSION}" /usr/bin/crictl -fi - - echo "installing kubeval" if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then curl -sSLo bin/kubeval-${KUBEVAL_VERSION}.tar.gz https://github.com/garethr/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz @@ -50,8 +33,8 @@ if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then fi cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval -echo "starting minikube with RBAC" -CHANGE_MINIKUBE_NONE_USER=true $PWD/bin/kind create cluster --image kindest/node:v${KUBE_VERSION} +echo "starting cluster with kind" +$PWD/bin/kind create cluster --image kindest/node:v${KUBE_VERSION} export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" echo "waiting for kubernetes" diff --git a/ci/test.sh b/ci/test.sh index c6386c533b..240cedddf5 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -18,6 +18,10 @@ fi TEST_NAMESPACE=jupyterhub-test +if [ "$RUNNER" = "kind" ]; then + export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" +fi + helm install --wait --name jupyterhub-test --namespace $TEST_NAMESPACE ./jupyterhub/ $Z2JH_HELM_ARGS if [ "$RUNNER" = "kind" ]; then From 71b1bddbb62b007b6332ab14a30fcaeebc211191 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Mon, 6 May 2019 15:40:44 +0800 Subject: [PATCH 04/77] only try to get IP for minikube --- ci/test.sh | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/ci/test.sh b/ci/test.sh index 240cedddf5..7590a81396 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -2,24 +2,25 @@ set -eux -# Is there a standard interface name? -for iface in eth0 ens4 enp0s3; do - IP=$(/sbin/ifconfig $iface | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'); - if [ -n "$IP" ]; then - echo "IP: $IP" - break - fi -done -if [ -z "$IP" ]; then - echo "Failed to get IP, current interfaces:" - /sbin/ifconfig -a - exit 2 -fi TEST_NAMESPACE=jupyterhub-test if [ "$RUNNER" = "kind" ]; then export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" +else + # Is there a standard interface name? + for iface in eth0 ens4 enp0s3; do + IP=$(/sbin/ifconfig $iface | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'); + if [ -n "$IP" ]; then + echo "IP: $IP" + break + fi + done + if [ -z "$IP" ]; then + echo "Failed to get IP, current interfaces:" + /sbin/ifconfig -a + exit 2 + fi fi helm install --wait --name jupyterhub-test --namespace $TEST_NAMESPACE ./jupyterhub/ $Z2JH_HELM_ARGS From 484c1ea929e35c5416ea9bf38699f4cc12ae79ce Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Fri, 17 May 2019 15:14:30 +0800 Subject: [PATCH 05/77] Update kind to 0.3.0 --- ci/install-kind.sh | 2 +- ci/kind-1.14-default.env | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/install-kind.sh b/ci/install-kind.sh index 1581f65033..e8c04f8521 100755 --- a/ci/install-kind.sh +++ b/ci/install-kind.sh @@ -19,7 +19,7 @@ fi if ! [ -f "bin/kind" ]; then echo "installing kind" - curl -Lo kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64 + curl -Lo kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64 chmod +x kind mv kind bin/ fi diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env index c97d83be3e..27d7b4311f 100644 --- a/ci/kind-1.14-default.env +++ b/ci/kind-1.14-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.14.0 -export KIND_VERSION=0.2.1 +export KUBE_VERSION=1.14.1 +export KIND_VERSION=0.3.0 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" From 1f1cbedb3a200c649c76f2a6d087fc9b4a466c99 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Thu, 23 May 2019 14:47:18 +0800 Subject: [PATCH 06/77] Enable kind CI for 1.12 and 1.13 --- .travis.yml | 2 ++ ci/kind-1.12-default.env | 11 +++++++++++ ci/kind-1.13-default.env | 11 +++++++++++ ci/kind-1.14-default.env | 2 +- 4 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 ci/kind-1.12-default.env create mode 100644 ci/kind-1.13-default.env diff --git a/.travis.yml b/.travis.yml index 009948ce99..04d793fd89 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,6 +21,8 @@ env: - SCENARIO=1.12-netpol RUNNER=minikube - SCENARIO=1.11-default RUNNER=minikube - SCENARIO=1.14-default RUNNER=kind + - SCENARIO=1.13-default RUNNER=kind + - SCENARIO=1.12-default RUNNER=kind global: - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= diff --git a/ci/kind-1.12-default.env b/ci/kind-1.12-default.env new file mode 100644 index 0000000000..a433e351a6 --- /dev/null +++ b/ci/kind-1.12-default.env @@ -0,0 +1,11 @@ +export KUBE_VERSION=1.12.8 +export KIND_VERSION=0.3.0 +export HELM_VERSION=2.12.3 +export KUBEVAL_VERSION=0.7.3 +export PATH="$PWD/bin:$PATH" + +export Z2JH_HELM_ARGS="-f minikube-config.yaml" +export DISABLE_TEST_NETPOL=1 +export INSTALL_CALICO=0 + +export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.13-default.env b/ci/kind-1.13-default.env new file mode 100644 index 0000000000..6afc6f156e --- /dev/null +++ b/ci/kind-1.13-default.env @@ -0,0 +1,11 @@ +export KUBE_VERSION=1.13.6 +export KIND_VERSION=0.3.0 +export HELM_VERSION=2.12.3 +export KUBEVAL_VERSION=0.7.3 +export PATH="$PWD/bin:$PATH" + +export Z2JH_HELM_ARGS="-f minikube-config.yaml" +export DISABLE_TEST_NETPOL=1 +export INSTALL_CALICO=0 + +export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env index 27d7b4311f..a2f30df0c2 100644 --- a/ci/kind-1.14-default.env +++ b/ci/kind-1.14-default.env @@ -1,4 +1,4 @@ -export KUBE_VERSION=1.14.1 +export KUBE_VERSION=1.14.2 export KIND_VERSION=0.3.0 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 From eca8b54968b428550bafe0145f74fe0884c9b090 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Thu, 4 Jul 2019 23:22:43 +0800 Subject: [PATCH 07/77] bump kind to 0.4.0 and test kubernetes 1.15 --- ci/kind-1.12-default.env | 4 ++-- ci/kind-1.13-default.env | 4 ++-- ci/kind-1.14-default.env | 4 ++-- ci/kind-1.15-default.env | 11 +++++++++++ 4 files changed, 17 insertions(+), 6 deletions(-) create mode 100644 ci/kind-1.15-default.env diff --git a/ci/kind-1.12-default.env b/ci/kind-1.12-default.env index a433e351a6..c51aef7602 100644 --- a/ci/kind-1.12-default.env +++ b/ci/kind-1.12-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.12.8 -export KIND_VERSION=0.3.0 +export KUBE_VERSION=1.12.9 +export KIND_VERSION=0.4.0 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" diff --git a/ci/kind-1.13-default.env b/ci/kind-1.13-default.env index 6afc6f156e..8c263e7f8e 100644 --- a/ci/kind-1.13-default.env +++ b/ci/kind-1.13-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.13.6 -export KIND_VERSION=0.3.0 +export KUBE_VERSION=1.13.7 +export KIND_VERSION=0.4.0 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env index a2f30df0c2..2f45ce3219 100644 --- a/ci/kind-1.14-default.env +++ b/ci/kind-1.14-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.14.2 -export KIND_VERSION=0.3.0 +export KUBE_VERSION=1.14.3 +export KIND_VERSION=0.4.0 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" diff --git a/ci/kind-1.15-default.env b/ci/kind-1.15-default.env new file mode 100644 index 0000000000..f42d981afe --- /dev/null +++ b/ci/kind-1.15-default.env @@ -0,0 +1,11 @@ +export KUBE_VERSION=1.15.0 +export KIND_VERSION=0.4.0 +export HELM_VERSION=2.12.3 +export KUBEVAL_VERSION=0.7.3 +export PATH="$PWD/bin:$PATH" + +export Z2JH_HELM_ARGS="-f minikube-config.yaml" +export DISABLE_TEST_NETPOL=1 +export INSTALL_CALICO=0 + +export RUN_PUBLISH_SCRIPT=0 From 0528653b119804fbefe485d0fe982d7ce83f0bb2 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Mon, 26 Aug 2019 23:31:09 +0800 Subject: [PATCH 08/77] bump kind to 0.5.1 --- ci/kind-1.12-default.env | 4 ++-- ci/kind-1.13-default.env | 4 ++-- ci/kind-1.14-default.env | 4 ++-- ci/kind-1.15-default.env | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ci/kind-1.12-default.env b/ci/kind-1.12-default.env index c51aef7602..41f9075004 100644 --- a/ci/kind-1.12-default.env +++ b/ci/kind-1.12-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.12.9 -export KIND_VERSION=0.4.0 +export KUBE_VERSION=1.12.10 +export KIND_VERSION=0.5.1 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" diff --git a/ci/kind-1.13-default.env b/ci/kind-1.13-default.env index 8c263e7f8e..506fa99ad7 100644 --- a/ci/kind-1.13-default.env +++ b/ci/kind-1.13-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.13.7 -export KIND_VERSION=0.4.0 +export KUBE_VERSION=1.13.10 +export KIND_VERSION=0.5.1 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env index 2f45ce3219..32ce01a12c 100644 --- a/ci/kind-1.14-default.env +++ b/ci/kind-1.14-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.14.3 -export KIND_VERSION=0.4.0 +export KUBE_VERSION=1.14.6 +export KIND_VERSION=0.5.1 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" diff --git a/ci/kind-1.15-default.env b/ci/kind-1.15-default.env index f42d981afe..e9a031e390 100644 --- a/ci/kind-1.15-default.env +++ b/ci/kind-1.15-default.env @@ -1,5 +1,5 @@ -export KUBE_VERSION=1.15.0 -export KIND_VERSION=0.4.0 +export KUBE_VERSION=1.15.3 +export KIND_VERSION=0.5.1 export HELM_VERSION=2.12.3 export KUBEVAL_VERSION=0.7.3 export PATH="$PWD/bin:$PATH" From aaed8e8701e2e48263221a418e2cba9b44ab8dcf Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Tue, 27 Aug 2019 22:47:55 +0800 Subject: [PATCH 09/77] actually enable 1.15 kind-ci --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 04d793fd89..35f1610dc0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ env: - SCENARIO=1.13-default RUNNER=minikube - SCENARIO=1.12-netpol RUNNER=minikube - SCENARIO=1.11-default RUNNER=minikube + - SCENARIO=1.15-default RUNNER=kind - SCENARIO=1.14-default RUNNER=kind - SCENARIO=1.13-default RUNNER=kind - SCENARIO=1.12-default RUNNER=kind From 9ac3d4bef9ec6c303f5800faabc2c953f7316f79 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Tue, 27 Aug 2019 23:36:58 +0800 Subject: [PATCH 10/77] fix travis-run-kind --- ci/vagrant-run-kind.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/vagrant-run-kind.sh b/ci/vagrant-run-kind.sh index 4a5a5f5f59..06f56d19af 100755 --- a/ci/vagrant-run-kind.sh +++ b/ci/vagrant-run-kind.sh @@ -2,7 +2,8 @@ # Run this inside vagrant to test the travis scripts set -eux -export SCENARIO=1.14 +export SCENARIO=1.15-default +export RUNNER=kind export TRAVIS_BRANCH=master export TRAVIS_PULL_REQUEST=true export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse --short HEAD` From 2b369db9b9f72b881050d5d429422c81a9c7e2ae Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Tue, 27 Aug 2019 23:54:07 +0800 Subject: [PATCH 11/77] check why chartpress is not detecting existing images --- ci/travis-script.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ci/travis-script.sh b/ci/travis-script.sh index 408d0a06c5..bde3248fa6 100755 --- a/ci/travis-script.sh +++ b/ci/travis-script.sh @@ -10,6 +10,10 @@ if [[ ]]; then ./ci/publish-chart.sh else + + echo "is chartpress detection failing?" + python3 -c 'import docker; print(docker.from_env().images.get_registry_data("jupyterhub/k8s-hub:0.9-c9f80ce").id)' + chartpress --commit-range ${TRAVIS_COMMIT_RANGE} fi git diff From c36a00493442e2a6751d4fc9d49730908ccee204 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Wed, 28 Aug 2019 00:40:33 +0800 Subject: [PATCH 12/77] Disable registry mirror to make chartpress cache check work --- .travis.yml | 1 + ci/docker-fixes.sh | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100755 ci/docker-fixes.sh diff --git a/.travis.yml b/.travis.yml index 35f1610dc0..cea1c19bc1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ git: services: - docker install: + - ./ci/docker-fixes.sh - pip3 install --no-cache-dir -r dev-requirements.txt - . "ci/${RUNNER}-${SCENARIO}.env" - ./ci/install-${RUNNER}.sh diff --git a/ci/docker-fixes.sh b/ci/docker-fixes.sh new file mode 100755 index 0000000000..8523cb9625 --- /dev/null +++ b/ci/docker-fixes.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -ex + +# https://github.com/moby/moby/issues/39120 +sudo cat /etc/docker/daemon.json +echo '{"mtu": 1460}' | sudo dd of=/etc/docker/daemon.json +sudo systemctl restart docker +docker ps -a From 1ba69aa03e70353984af52e486114554cb544766 Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Wed, 28 Aug 2019 00:56:53 +0800 Subject: [PATCH 13/77] helm can be installed whilest the kind node gets ready --- ci/install-kind.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ci/install-kind.sh b/ci/install-kind.sh index e8c04f8521..94c1ca3c99 100755 --- a/ci/install-kind.sh +++ b/ci/install-kind.sh @@ -37,11 +37,6 @@ echo "starting cluster with kind" $PWD/bin/kind create cluster --image kindest/node:v${KUBE_VERSION} export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" -echo "waiting for kubernetes" -JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' -until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do - sleep 1 -done kubectl get nodes echo "installing helm" From 204b7ce9b0ce48b807227fffad9633ceb4098c2f Mon Sep 17 00:00:00 2001 From: Chia-liang Kao Date: Wed, 28 Aug 2019 00:57:20 +0800 Subject: [PATCH 14/77] Revert "check why chartpress is not detecting existing images" This reverts commit eede891479179d0a2dbc8a93deb2076d7aeb3ae0. --- ci/travis-script.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ci/travis-script.sh b/ci/travis-script.sh index bde3248fa6..408d0a06c5 100755 --- a/ci/travis-script.sh +++ b/ci/travis-script.sh @@ -10,10 +10,6 @@ if [[ ]]; then ./ci/publish-chart.sh else - - echo "is chartpress detection failing?" - python3 -c 'import docker; print(docker.from_env().images.get_registry_data("jupyterhub/k8s-hub:0.9-c9f80ce").id)' - chartpress --commit-range ${TRAVIS_COMMIT_RANGE} fi git diff From 320470f177e0606b0870a9e505b82115c6fba460 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 23 Sep 2019 19:56:16 +0200 Subject: [PATCH 15/77] Big CI/CD and contributing docs rework ref: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1422 --- .binder/README.md | 3 + {binder => .binder}/environment.yml | 3 - .circleci/README.md | 8 + .circleci/config.yml | 3 - .gitignore | 3 +- .travis.yml | 89 +++- CONTRIBUTING.md | 411 ++++++------------ RELEASE.md | 203 +++++++++ Vagrantfile | 20 + chartpress.yaml | 2 +- ci/Vagrantfile | 28 -- ci/common | 109 +++++ ci/docker-fixes.sh | 8 - ci/install-kind.sh | 58 --- ci/install-minikube.sh | 97 ----- ci/kind-1.12-default.env | 11 - ci/kind-1.13-default.env | 11 - ci/kind-1.14-default.env | 11 - ci/kind-1.15-default.env | 11 - ci/kind-config.yaml | 11 + ci/kind-load-docker-images.py | 94 ++++ ci/minikube-1.11-default.env | 15 - ci/minikube-1.12-netpol.env | 12 - ci/minikube-1.13-default.env | 12 - ci/{publish-chart.sh => publish} | 4 +- ci/{id_rsa.enc => publish-id_rsa.enc} | Bin ci/start-k8s | 100 +++++ ci/test | 30 ++ ci/test.sh | 80 ---- ci/travis-docker-fix | 13 + ci/travis-script.sh | 17 - ci/upgrade | 35 ++ ci/vagrant-run-kind.sh | 14 - ci/vagrant-run.sh | 13 - ci/xenial-setup.sh | 33 -- dev | 38 ++ ...kube-netpol.yaml => dev-config-netpol.yaml | 0 minikube-config.yaml => dev-config.yaml | 0 dev-requirements.txt | 24 +- images/hub/Dockerfile | 2 + images/singleuser-sample/Dockerfile | 2 +- jupyterhub/Chart.yaml | 4 +- jupyterhub/values.yaml | 8 +- tests/README.md | 3 + tests/conftest.py | 8 +- tests/test_hub_is_ready.py | 16 - tests/test_spawn.py | 90 +++- tools/templates/lint-and-validate.py | 37 +- vagrant-vm-setup.sh | 21 + 49 files changed, 1016 insertions(+), 809 deletions(-) create mode 100644 .binder/README.md rename {binder => .binder}/environment.yml (63%) create mode 100644 .circleci/README.md create mode 100644 RELEASE.md create mode 100644 Vagrantfile delete mode 100644 ci/Vagrantfile create mode 100755 ci/common delete mode 100755 ci/docker-fixes.sh delete mode 100755 ci/install-kind.sh delete mode 100755 ci/install-minikube.sh delete mode 100644 ci/kind-1.12-default.env delete mode 100644 ci/kind-1.13-default.env delete mode 100644 ci/kind-1.14-default.env delete mode 100644 ci/kind-1.15-default.env create mode 100644 ci/kind-config.yaml create mode 100755 ci/kind-load-docker-images.py delete mode 100644 ci/minikube-1.11-default.env delete mode 100644 ci/minikube-1.12-netpol.env delete mode 100644 ci/minikube-1.13-default.env rename ci/{publish-chart.sh => publish} (91%) rename ci/{id_rsa.enc => publish-id_rsa.enc} (100%) create mode 100755 ci/start-k8s create mode 100755 ci/test delete mode 100755 ci/test.sh create mode 100755 ci/travis-docker-fix delete mode 100755 ci/travis-script.sh create mode 100755 ci/upgrade delete mode 100755 ci/vagrant-run-kind.sh delete mode 100755 ci/vagrant-run.sh delete mode 100755 ci/xenial-setup.sh create mode 100755 dev rename minikube-netpol.yaml => dev-config-netpol.yaml (100%) rename minikube-config.yaml => dev-config.yaml (100%) create mode 100644 tests/README.md delete mode 100644 tests/test_hub_is_ready.py create mode 100644 vagrant-vm-setup.sh diff --git a/.binder/README.md b/.binder/README.md new file mode 100644 index 0000000000..5914e4391b --- /dev/null +++ b/.binder/README.md @@ -0,0 +1,3 @@ +# What is this folder about? + +It's contains the dependency information required by [a notebook](doc/ntbk/draw_function.ipynb) that [we reference](doc/source/cost.rst) in our documentation to run on mybinder.org. All it takes is the click of [a link](http://mybinder.org/v2/gh/jupyterhub/zero-to-jupyterhub-k8s/master?filepath=doc/ntbk/draw_function.ipynb) thanks to this. diff --git a/binder/environment.yml b/.binder/environment.yml similarity index 63% rename from binder/environment.yml rename to .binder/environment.yml index e00b478e8f..7838b30cb0 100644 --- a/binder/environment.yml +++ b/.binder/environment.yml @@ -1,6 +1,3 @@ -# Dependencies for the guides cloud cost estimator notebook -# http://mybinder.org/v2/gh/jupyterhub/zero-to-jupyterhub-k8s/master?filepath=doc/ntbk/draw_function.ipynb - # Need to install bqplot with conda so it calls the javascript extension. name: bqplot channels: diff --git a/.circleci/README.md b/.circleci/README.md new file mode 100644 index 0000000000..882bb52d92 --- /dev/null +++ b/.circleci/README.md @@ -0,0 +1,8 @@ +# What is this folder about? + +We use CircleCI to build documentation previews for PRs, as configured through +[.circleci/config.yml], this allows us to easily preview documentation changes +in a PR in its final form before the PR is merged. + +When a PR is merged [readthedocs.yml](readthedocs.yml) will help ReadTheDocs +build and publish it on https://z2jh.jupyter.org. diff --git a/.circleci/config.yml b/.circleci/config.yml index af5a7e5587..c79ebbced7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,3 @@ -# This CircleCI build lets us preview the documentation inside PRs before they -# are merged! And when they are, the readthedocs.yml file will help ReadTheDocs -# build and publish it on https://z2jh.jupyter.org. version: 2 jobs: build_docs: diff --git a/.gitignore b/.gitignore index c3bbe92c58..509d2f3d9d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,9 @@ ### Zero to JupyterHub Kubernetes ### tools/templates/rendered-templates/ bin/ -ci/.vagrant +.vagrant/ tools/github.sqlite +ci/daemonset-calico-node.yaml .vscode diff --git a/.travis.yml b/.travis.yml index cea1c19bc1..150c552f9c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,28 +3,81 @@ language: python python: - 3.6 git: + ## depth set to false overrides travis default behavior to use shallow clones + ## with depth 50 that can cause issues + ## + ## ref: https://github.com/jupyterhub/chartpress#shallow-clones + ## depth: false services: - docker +## stages declares and orders stages +## +## ref: https://docs.travis-ci.com/user/build-stages/#build-stages-and-deployments +## +stages: + - name: lint and validate + - name: test + - name: publish + ## if conditions gives us control if the stage should run + ## + ## ref: https://docs.travis-ci.com/user/conditions-v1 + ## + if: > + branch in (master) AND + type in (push) install: - - ./ci/docker-fixes.sh - pip3 install --no-cache-dir -r dev-requirements.txt - - . "ci/${RUNNER}-${SCENARIO}.env" - - ./ci/install-${RUNNER}.sh -script: - - ./ci/travis-script.sh + - . ci/common ci +stage: test +script: + - setup_kubectl + - setup_kind + - setup_helm + - ./ci/travis-docker-fix + - ./ci/start-k8s + - ./ci/upgrade + - ./ci/test env: - # Different scenarios described in - # /ci/minikube-${SCENARIO}.env - matrix: - - SCENARIO=1.13-default RUNNER=minikube - - SCENARIO=1.12-netpol RUNNER=minikube - - SCENARIO=1.11-default RUNNER=minikube - - SCENARIO=1.15-default RUNNER=kind - - SCENARIO=1.14-default RUNNER=kind - - SCENARIO=1.13-default RUNNER=kind - - SCENARIO=1.12-default RUNNER=kind - global: - - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= - - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= + ## NOTE: The environment variables will be expanded to multiple jobs. For + ## additional individual jobs, only the first entry is used. + ## + ## ref: https://docs.travis-ci.com/user/build-stages/#build-stages-and-build-matrix-expansion + ## + ## + ## KUBE_VERSION should match a released kindest/node image tag, but they are + ## currently not automatically published. + ## + ## ref: https://hub.docker.com/r/kindest/node/tags + ## ref: https://github.com/kubernetes-sigs/kind/issues/197 + ## + # - KUBE_VERSION=1.16 + - KUBE_VERSION=1.15.3 + - KUBE_VERSION=1.14.6 + - KUBE_VERSION=1.13.10 + - KUBE_VERSION=1.12.10 + +jobs: + ## include additional individual jobs + ## + include: + - stage: lint and validate + script: + - setup_helm + - setup_kubeval + - python3 tools/templates/lint-and-validate.py --kubernetes-versions $LINT_KUBE_VERSIONS + env: [] + - stage: publish + script: + - setup_helm + - setup_git_crypt + - ./ci/travis-docker-fix + - ./ci/publish + env: + ## encrypted environment variables, used on push to master in the + ## publish script to in turn decrypt a SSH key + ## + ## ref: https://docs.travis-ci.com/user/environment-variables/#encrypting-environment-variables + - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= + - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ae3c82a20d..a58eddb48b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,341 +1,180 @@ # Contributing -Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). +Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter +contributor +guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). -## Setting up minikube for local development +## Local development for a code contribution -We recommend using [minikube](https://github.com/kubernetes/minikube) for local -development. +### Prepare git -1. [Download & install minikube](https://github.com/kubernetes/minikube#installation). +1. Install [git](https://www.git-scm.com/). To verify it is installed, run this + from a terminal. - For MacOS: You may install minikube using Homebrew `brew cask install minikube` or - from a binary at https://github.com/kubernetes/minikube/releases. - If you need to install Docker Community Edition (CE) for Mac, please - follow the [Docker instructions](https://store.docker.com/editions/community/docker-ce-desktop-mac). - -2. [Download & install helm](https://github.com/helm/helm#install). - - You may install Helm using one of the following steps: - - * With the following curl command: - - ``` - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash - ``` - * From one of the binaries at https://github.com/helm/helm/releases - * For MacOS, using Homebrew: `brew install kubernetes-helm` - -3. Start minikube. - - For minikube version 0.26 and higher: ```bash - minikube start + git version ``` - For older minikube versions: - ```bash - minikube start --extra-config=apiserver.Authorization.Mode=RBAC - ``` +1. Make a GitHub fork of [this + repository](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) by creating + and then logging into your GitHub account and clicking the Fork button. - Note on troubleshooting: if you recently upgraded minikube and are now seeing - errors, you may need to clear out the `~/.minikube` and `~/.kube` directories - and reboot. - -4. Use the docker daemon inside minikube for building: - ```bash - eval $(minikube docker-env) - ``` +1. Clone your fork to your local computer. -5. Clone the zero-to-jupyterhub repo: ```bash - git clone git@github.com:jupyterhub/zero-to-jupyterhub-k8s.git + git clone http://github.com//zero-to-jupyterhub-k8s.git cd zero-to-jupyterhub-k8s - ``` -6. Create a virtualenv & install the libraries required for builds to happen: - ```bash - python3 -m venv . - source bin/activate - python3 -m pip install -r dev-requirements.txt + # make it easy to reference the projects GitHub repository as "upstream" + git remote add upstream https://github.com/jupyterhub/zero-to-jupyterhub-k8s + + # make it obvious what you reference by renaming a reference to your + # personal GitHub repository to "fork" + git remote rename origin fork ``` -7. Now run `chartpress` to build the requisite docker images inside minikube: - ```bash - chartpress - ``` +### Prepare Virtual Machine software - This will build the docker images inside minikube & modify - `jupyterhub/values.yaml` with the appropriate values to make the chart - installable! +A `Vagrantfile` is a way to prepare a Virtual Machine (VM), and we [have +one](Vagrantfile) to prepare a VM for local development! We can use it to get +a VM up and running, enter it with SSH, develop and run tests, and later shut +down without influencing our system. -8. Configure helm and minikube for RBAC: - ```bash - kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default - kubectl --namespace kube-system create sa tiller - kubectl create clusterrolebinding tiller \ - --clusterrole cluster-admin \ - --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` +1. Install VirtualBox by [downloading and running an + installer](https://www.virtualbox.org/wiki/Downloads). -9. Install / Upgrade JupyterHub Chart! - ```bash - helm upgrade --wait --install --namespace=hub hub jupyterhub/ -f minikube-config.yaml - ``` +1. Install Vagrant by [downloading and running an + installer](https://www.vagrantup.com/downloads.html). - You can easily change the options in `minikube-config.yaml` file to test what - you want, or create another `config.yaml` file & pass that as an additional - `-f config.yaml` file to the `helm upgrade` command. +### Develop and run tests -10. Retrieve the URL for your instance of JupyterHub: +1. Start a prepared VM and SSH into it. ```bash - minikube service --namespace=hub proxy-public - ``` - - Navigate to the URL in your browser. You should now have JupyterHub running - on minikube. + ## if you have suspended a VM earlier, use "vagrat resume" instead + vagrant up -11. Make the changes you want. + ## enter a SSH session with the VM + vagrant ssh + ``` - To view your changes on the running development instance of JupyterHub: +2. Develop and test within the VM + + ```bash + ## run within the SSH session + cd zero-to-jupyterhub-k8s + + ## initialize some environment variables etc (notice the leading dot) + . ./dev init - - Re-run step 7 if you changed anything under the `images` directory - - Re-run step 9 if you changed things only under the `jupyterhub` directory. + ## start a k8s cluster + ./dev start-k8s + ## install/upgrade the helm chart + ./dev upgrade -## Travis CI tests + ## see the results + # visit http://localhost:8090 -Travis tests are automatically run on every pull request. -Since the Travis environment is not accessible it can be difficult to debug CI failures. -A [`Vagrantfile`](ci/Vagrantfile) which partially simulates the Travis environment is included, and may be useful when updating the CI deployments, though it is by no means an exact replica. + ## make a change + # ... -1. Start and login to the Vagrant box: + ## run tests + ./dev test + ``` + +3. Close the SSH session ```bash - cd ci - vagrant up - vagrant ssh + ## exit the SSH session + exit + vagrant suspend + # vagrant halt + # vagrant destroy ``` -2. Run the test script. - Optionally edit `SCENARIO` in [`./ci/vagrant-run.sh`](./ci/vagrant-run.sh) - if you want to test a different scenario +> **NOTE:** You can also use `vagrant destroy` to reset the VM state entirely, +> but the start-k8s script will reset the k8s cluster if you have the same k8s +> version set as previous so it should be fine to just `halt` and do `up` again +> later. - ```bash - cd /zero-to-jupyterhub-k8s - ./ci/vagrant-run.sh - ``` +### Debugging issues +Various things can go wrong while working with the local development +environment, here are some typical issues and what to do about them. ---- +#### Network errors -## Best practices +Did you get an error like one of these below? -We strive to follow the guidelines provided by [kubernetes/charts](https://github.com/kubernetes/charts/blob/master/REVIEW_GUIDELINES.md) and the [Helm Chart Best Practices Guide](https://github.com/kubernetes/helm/tree/master/docs/chart_best_practices) they refer to. +```shell +# while installing docker +curl: (6) Could not resolve host: download.docker.com -## Releasing a new version of the helm chart +# while running pip install +Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution',)': /simple/chartpress/ -The following steps can be followed to release a new version of the Helm Chart. -Presently, we expect a release approximately every 5-7 weeks. +# while running apt-get install while building a docker image with chartpress +E: Failed to fetch http://archive.ubuntu.com/ubuntu/pool/main/r/rtmpdump/librtmp1_2.4+20151223.gitfa8646d.1-1_amd64.deb Could not connect to archive.ubuntu.com:80 (91.189.88.174). - connect (113: No route to host) Could not connect to archive.ubuntu.com:80 (91.189.88.31). - connect (113: No route to host) [IP: 91.189.88.174 80] +# [...] +subprocess.CalledProcessError: Command '['docker', 'build', '-t', 'jupyterhub/k8s-hub:0.9-217f798', 'images/hub', '--build-arg', 'JUPYTERHUB_VERSION=git+https://github.com/jupyterhub/jupyterhub@master']' returned non-zero exit status 100. +# while installing a dependency for our k8s cluster +Unable to connect to the server: dial tcp: lookup docs.projectcalico.org on 127.0.0.53:53: read udp 127.0.0.1:56409->127.0.0.53:53: i/o timeout +``` -### Create an issue for the new release +Network and DNS issues are typically symptoms of unreliable internet (as +experienced by the VirtualMachine). You can recognize such issues if you get +errors like the ones above. -Use this issue to coordinate efforts and keep track of progress. You can -copy / paste the raw Markdown from the following list, which will be covered -in more detail below. +As you may notice, typical keywords associated with network errors are: -``` -Title: Release {{release-name}} -Content: - -This issue will be used to coordinate the next release of the helm -chart, {{release-name}}. Instructions for creating the release can be found in -[CONTRIBUTING.md](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CONTRIBUTING.md#releasing-a-new-version-of-the-helm-chart). -Below is the checklist for this release. - -- [ ] Code, tests, and documentation to support a release are stable. -- [ ] Make a CHANGELOG -- [ ] Generate and add the list of contributors -- [ ] Build and push a new Docker image to DockerHub -- [ ] Commit version bump in `Chart.yaml` and `Values.yaml` -- [ ] Update references in documentation to the new version (note: documentation - should be stable and there should be no anticipated major changes to content). -- [ ] Confirm that a new deployment using the updated instructions works -- [ ] Create and push a new tag for this release -- [ ] Create and publish a new GitHub release -- [ ] Write / publish a blog post based largely off of the CHANGELOG -- [ ] Set ReadTheDocs to begin using `latest` by default -- [ ] Celebrate! -``` +- *resolve host* +- *name resolution* +- *timeout* +- *no route to host* -As there are often many documentation improvements following the release of -a new version, we set ReadTheDocs to serve `latest/` until the first docs are -written that are next-version-specific. As soon as documentation must be -written for the **next** version of the Helm Chart, you can use the following -checklist: +#### Unable to listen on port + +Did you get an error like this? ``` -- [ ] Create a new tag for a documentation release (same release name with `-doc` at the end) -- [ ] Publish this tag -- [ ] Set ReadTheDocs to point to the **new tag** by default instead of `latest` -- [ ] Continue making next-version-specific changes to the documentation. +Unable to listen on port 8090: Listeners failed to create with the following errors: [Unable to create listener: Error listen tcp4 127.0.0.1:8090: bind: address already in use Unable to create listener: Error listen tcp6 [::1]:8090: bind: address already in use] ``` -**Note**: Switching the documentation to `latest` after a new release is a stop-gap -measure to accomodate the fact that the documentation is still changing relatively -rapidly. Once the documentation as a whole stabilizes (after a few more release -cycles), we plan to begin switching straight from the last version to the new version -of documentation without going through latest. +The key to solving this is understanding it! + +We need to shuttle traffic from your computer to your Kubernetes clusters's +Service that in turn shuttle the traffic to the pod of relevance. While doing +so, we can end up with issues like the one above if we end up asking for traffic +to go to more than one place. + +Let's look on how we need traffic to be shuttled! + +1. *Traffic entering your computer should go to your VM.* + + When you run `vagrant up` your computer will read the + [Vagrantfile](Vagrantfile) and from that conclude it should shuttle traffic + incoming to your computer on port `8090` to your VM on port `8080`. + +2. *Traffic entering your VM should go to your Kubernetes cluster's Service named `proxy-public`.* -### Make a CHANGELOG - -This needs to be manually created, following the format of -current [CHANGELOG](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CHANGELOG.md). The general structure should be: - -* A short description of the general theme / points of interest for - this release. -* Breaking changes + a link to the [upgrade instructions](https://zero-to-jupyterhub.readthedocs.io/en/v0.5-doc/upgrading.html) in the docs -* A list of features with brief descriptions under each. -* The contributor list mentioned in the section below. - -### Add list of contributors - -We try to recognize *all* sorts of contributors, rather -than just code committers. - -Use the script in `tools/contributors.py` to list all -contributions (anyone who made a commit or a comment) -since the latest release. For each -release, you'll need to find the versions of all repos -involved: - -* [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) -* [KubeSpawner](https://github.com/jupyterhub/kubespawner) -* [JupyterHub](https://github.com/jupyterhub/jupyterhub) -* [OAuthenticator](https://github.com/jupyterhub/oauthenticator) - -Edit `contributors.py` to have the appropriate dates -for each of these versions. Then, run the script and paste -the output into the changelog. For an -example, see [the v0.5 list of contributors](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/v0.5/CHANGELOG.md#contributors). + When you run `./dev upgrade`, that in turn runs the `kubectl port-forward` + command to shuttle traffic from port `8080` to the `proxy-public` Kubernetes + Service (port `80`) that we want to communicate with, it is the gate to speak + with the hub and proxy even though it is also possible to speak directly to + the hub. +In short, the traffic is routed from computer (8090), to the VM (8080), to the +Kubernetes `proxy-public` Service (80). -### Push built images to DockerHub + bump version - -The JupyterHub helm chart uses a Docker image that's registered -on DockerHub. When releasing a new version of the helm chart, -you also need to push a new version of this image. To do so, -you must have: +The reason you may run into an issue if is there is another service already +listening on traffic arriving on a given port. Then you would need to either +shut it down or route traffic differently. -1. Docker running locally -2. An account on DockerHub that you are logged into from - your local docker installation. -3. Push rights for images under `jupyterhub/` on - the DockerHub registry. -4. Push rights to the `jupyterhub/helm-chart` repository on GitHub. -5. A local SSH key that will let you push to the `helm-chart` repository - on GitHub. See [these instructions](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for information on how to create this. - -**Note**: If you don't have a DockerHub account, or don't have push rights to -the DockerHub registry, open an issue and ping one of the core devs. - -If you have all of this, you can then: - -1. Check out latest master of [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) -2. Run `chartpress --tag --push --publish-chart`. - * For example, to relase `v0.5`, you would run - `chartpress --tag v0.5 --push --publish-chart`. - Note the `v` before version. -3. This will also modify the files `Chart.yaml` and `values.yaml`. - Commit these changes. -4. Look through the [z2jh documentation](https://zero-to-jupyterhub.readthedocs.io) and find any references to - the Helm Chart version (e.g., look for the flag `--version`, as well - as for all `helm upgrade` and `helm install` commands). - Update these references to point to the new version you are releasing. -5. Make a PR to the z2jh repository and notify the team to take a look. - -After this PR gets merged: - -1. Go to https://zero-to-jupyterhub.readthedocs.io/en/latest and - deploy a JupyterHub using the instructions (make sure that - you're reading from `/en/latest`). Make sure your latest - changes are present, and that the JupyterHub successfully deploys - and functions properly. - -Next, move on to making a GitHub release, described below. - -### Tagging and making a GitHub release - -Now that our Docker image is pushed and we have updated the documentation -for z2jh, it's time to make a new GitHub release. To do this, you must have: - -1. Push rights to the `jupyterhub/zero-to-jupyterhub-k8s` repo - -You will need to make a git tag, and then create a GitHub release. - -1. Make sure you're on branch `master` with your latest changes from - the section above pulled. -2. Make a git tag with: - ``` - git tag -a - ``` - - Where `` should be the new version that you're releasing. - Note the `v` before the version number. - - Git will ask you to include a message with the tag. - Paste the entire contents of the CHANGELOG for this particular release. - An easy way to do this is to paste the contents in a text file, and - then refer to that text file with the call to commit: - `git tag -a -F ` -3. Push the tags to the `jupyterhub/zero-to-jupyterhub-k8s` repo with - `git push --tags`. - Note that `` is whatever your local git uses to refer - to the `jupyerhub/` organization's repository (e.g., `official` - or `upstream`) -3. Make a **GitHub Release**: - * go to https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases and click 'Draft new release'. - * The title should be the new version, followed by the name of the cricketer for the release. Like so:`v0.5: "Hamid Hassan"`. - * The description should include the entire changelog entry for this release. - * Make sure the title/description/tag name look correct, and then click - on `Publish Release`. - -You've just made a GitHub release! - - -### RTD update - -Wait a few hours to let the release 'cool' and make sure that links, -webpages, etc have updated. Then, update our documentation settings on -readthedocs to show `latest` by default. This marks the official -'release' of the version! - -### Last step - release a blog post and tell the world! - -The final step is to release a blog post. This doesn't have to be -done by the person who performed all of the above actions. - -To release a blog post for the new version, start a draft on the Jupyter Medium -blog. Copy/paste the section of the CHANGELOG corresponding to the new -release, then make minor modifications to make it more blog-friendly. +## Helm chart practices -Don't forget to tell the JupyterHub community about the new release, and -to encourage people to talk about it on social media! - -That's it! Congratulations on making a new release of JupyterHub! - -### Extra step - release a documentation release - -It is common that documentation changes are made shortly after a new release. -To handle this, we often create a documentation release a few days after a -major release. - -To do this, confirm that all changes to the documentation -are merged into master, then create a new tag with the same release name and -`-doc` appended to the end. Create a GitHub release with the new tag and a -description that points to the original release description. Finally, set -our ReadTheDocs settings to point users to the new `-doc` tag by default instead -of `latest`. +We strive to follow the guidelines provided by +[kubernetes/charts](https://github.com/kubernetes/charts/blob/master/REVIEW_GUIDELINES.md) +and the [Helm chart best practices +guide](https://github.com/kubernetes/helm/tree/master/docs/chart_best_practices). diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..158f24f4b0 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,203 @@ +# Releasing a new version of the helm chart + +The following steps can be followed to release a new version of the Helm Chart. + +## Create an issue for the new release + +Use this issue to coordinate efforts and keep track of progress. You can +copy / paste the raw Markdown from the following list, which will be covered +in more detail below. + +``` +Title: Release {{release-name}} +Content: + +This issue will be used to coordinate the next release of the helm +chart, {{release-name}}. Instructions for creating the release can be found in +[CONTRIBUTING.md](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CONTRIBUTING.md#releasing-a-new-version-of-the-helm-chart). +Below is the checklist for this release. + +- [ ] Code, tests, and documentation to support a release are stable. +- [ ] Make a CHANGELOG +- [ ] Generate and add the list of contributors +- [ ] Build and push a new Docker image to DockerHub +- [ ] Commit version bump in `Chart.yaml` and `Values.yaml` +- [ ] Update references in documentation to the new version (note: documentation + should be stable and there should be no anticipated major changes to content). +- [ ] Confirm that a new deployment using the updated instructions works +- [ ] Create and push a new tag for this release +- [ ] Create and publish a new GitHub release +- [ ] Write / publish a blog post based largely off of the CHANGELOG +- [ ] Set ReadTheDocs to begin using `latest` by default +- [ ] Celebrate! +``` + +As there are often many documentation improvements following the release of +a new version, we set ReadTheDocs to serve `latest/` until the first docs are +written that are next-version-specific. As soon as documentation must be +written for the **next** version of the Helm Chart, you can use the following +checklist: + +``` +- [ ] Create a new tag for a documentation release (same release name with `-doc` at the end) +- [ ] Publish this tag +- [ ] Set ReadTheDocs to point to the **new tag** by default instead of `latest` +- [ ] Continue making next-version-specific changes to the documentation. +``` + +**Note**: Switching the documentation to `latest` after a new release is a stop-gap +measure to accomodate the fact that the documentation is still changing relatively +rapidly. Once the documentation as a whole stabilizes (after a few more release +cycles), we plan to begin switching straight from the last version to the new version +of documentation without going through latest. + +## Make a CHANGELOG + +This needs to be manually created, following the format of +current [CHANGELOG](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CHANGELOG.md). The general structure should be: + +* A short description of the general theme / points of interest for + this release. +* Breaking changes + a link to the [upgrade instructions](https://zero-to-jupyterhub.readthedocs.io/en/v0.5-doc/upgrading.html) in the docs +* A list of features with brief descriptions under each. +* The contributor list mentioned in the section below. + +## Add list of contributors + +We try to recognize *all* sorts of contributors, rather +than just code committers. + +Use the script in `tools/contributors.py` to list all +contributions (anyone who made a commit or a comment) +since the latest release. For each +release, you'll need to find the versions of all repos +involved: + +* [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) +* [KubeSpawner](https://github.com/jupyterhub/kubespawner) +* [JupyterHub](https://github.com/jupyterhub/jupyterhub) +* [OAuthenticator](https://github.com/jupyterhub/oauthenticator) + +Edit `contributors.py` to have the appropriate dates +for each of these versions. Then, run the script and paste +the output into the changelog. For an +example, see [the v0.5 list of contributors](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/v0.5/CHANGELOG.md#contributors). + + +## Push built images to DockerHub + bump version + +The JupyterHub helm chart uses a Docker image that's registered +on DockerHub. When releasing a new version of the helm chart, +you also need to push a new version of this image. To do so, +you must have: + +1. Docker running locally +2. An account on DockerHub that you are logged into from + your local docker installation. +3. Push rights for images under `jupyterhub/` on + the DockerHub registry. +4. Push rights to the `jupyterhub/helm-chart` repository on GitHub. +5. A local SSH key that will let you push to the `helm-chart` repository + on GitHub. See [these instructions](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for information on how to create this. + +**Note**: If you don't have a DockerHub account, or don't have push rights to +the DockerHub registry, open an issue and ping one of the core devs. + +If you have all of this, you can then: + +1. Check out latest master of [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) +2. Run `chartpress --tag --push --publish-chart`. + * For example, to relase `v0.5`, you would run + `chartpress --tag v0.5 --push --publish-chart`. + Note the `v` before version. +3. This will also modify the files `Chart.yaml` and `values.yaml`. + Commit these changes. +4. Look through the [z2jh documentation](https://zero-to-jupyterhub.readthedocs.io) and find any references to + the Helm Chart version (e.g., look for the flag `--version`, as well + as for all `helm upgrade` and `helm install` commands). + Update these references to point to the new version you are releasing. +5. Make a PR to the z2jh repository and notify the team to take a look. + +After this PR gets merged: + +1. Go to https://zero-to-jupyterhub.readthedocs.io/en/latest and + deploy a JupyterHub using the instructions (make sure that + you're reading from `/en/latest`). Make sure your latest + changes are present, and that the JupyterHub successfully deploys + and functions properly. + +Next, move on to making a GitHub release, described below. + +## Tagging and making a GitHub release + +Now that our Docker image is pushed and we have updated the documentation +for z2jh, it's time to make a new GitHub release. To do this, you must have: + +1. Push rights to the `jupyterhub/zero-to-jupyterhub-k8s` repo + +You will need to make a git tag, and then create a GitHub release. + +1. Make sure you're on branch `master` with your latest changes from + the section above pulled. +2. Make a git tag with: + ``` + git tag -a + ``` + + Where `` should be the new version that you're releasing. + Note the `v` before the version number. + + Git will ask you to include a message with the tag. + Paste the entire contents of the CHANGELOG for this particular release. + An easy way to do this is to paste the contents in a text file, and + then refer to that text file with the call to commit: + `git tag -a -F ` +3. Push the tags to the `jupyterhub/zero-to-jupyterhub-k8s` repo with + `git push --tags`. + Note that `` is whatever your local git uses to refer + to the `jupyerhub/` organization's repository (e.g., `official` + or `upstream`) +3. Make a **GitHub Release**: + * go to https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases and click 'Draft new release'. + * The title should be the new version, followed by the name of the cricketer for the release. Like so:`v0.5: "Hamid Hassan"`. + * The description should include the entire changelog entry for this release. + * Make sure the title/description/tag name look correct, and then click + on `Publish Release`. + +You've just made a GitHub release! + + +## ReadTheDocs update + +Wait a few hours to let the release 'cool' and make sure that links, +webpages, etc have updated. Then, update our documentation settings on +readthedocs to show `latest` by default. This marks the official +'release' of the version! + +## Last step - release a blog post and tell the world! + +The final step is to release a blog post. This doesn't have to be +done by the person who performed all of the above actions. + +To release a blog post for the new version, start a draft on the Jupyter Medium +blog. Copy/paste the section of the CHANGELOG corresponding to the new +release, then make minor modifications to make it more blog-friendly. + +Don't forget to tell the JupyterHub community about the new release, and +to encourage people to talk about it on social media! + +That's it! Congratulations on making a new release of JupyterHub! + +## Extra step - release a documentation release + +It is common that documentation changes are made shortly after a new release. +To handle this, we often create a documentation release a few days after a +major release. + +To do this, confirm that all changes to the documentation +are merged into master, then create a new tag with the same release name and +`-doc` appended to the end. Create a GitHub release with the new tag and a +description that points to the original release description. Finally, set +our ReadTheDocs settings to point users to the new `-doc` tag by default instead +of `latest`. + diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000000..9f3e00faca --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,20 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.box = "generic/ubuntu1804" + config.vm.provider "virtualbox" do |vb| + vb.memory = "3072" + vb.cpus = 2 + end + + config.vm.provider :libvirt do |lv| + lv.memory = 3072 + lv.cpus = 2 + end if Vagrant.has_plugin?('vagrant-libvirt') + + config.vm.network "forwarded_port", guest: 8080, host: 8090 + + config.vm.provision "shell", path: "vagrant-vm-setup.sh" + config.vm.synced_folder ".", "/home/vagrant/zero-to-jupyterhub-k8s" +end diff --git a/chartpress.yaml b/chartpress.yaml index 31b44966c1..5edf63cf18 100644 --- a/chartpress.yaml +++ b/chartpress.yaml @@ -9,7 +9,7 @@ charts: hub: valuesPath: hub.image buildArgs: - JUPYTERHUB_VERSION: 1.0.0 + JUPYTERHUB_VERSION: git+https://github.com/jupyterhub/jupyterhub@89b0c42 network-tools: valuesPath: singleuser.networkTools.image image-awaiter: diff --git a/ci/Vagrantfile b/ci/Vagrantfile deleted file mode 100644 index 5270af33fb..0000000000 --- a/ci/Vagrantfile +++ /dev/null @@ -1,28 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Simulate a travis Xenial environment for local travis configuration - -# Example: -# $ vagrant up -# $ vagrant ssh -# vagrant$ cd /zero-to-jupyterhub-k8s -# vagrant$ ./ci/vagrant-run.sh - -Vagrant.configure("2") do |config| - config.vm.box = "generic/ubuntu1604" - config.vm.provider "virtualbox" do |vb| - vb.memory = "3072" - vb.cpus = 2 - end - - config.vm.provider :libvirt do |lv| - lv.memory = 3072 - lv.cpus = 2 - end if Vagrant.has_plugin?('vagrant-libvirt') - - config.vm.network "forwarded_port", guest: 31212, host: 31212 - - config.vm.provision "shell", path: "xenial-setup.sh" - config.vm.synced_folder "../", "/zero-to-jupyterhub-k8s" -end diff --git a/ci/common b/ci/common new file mode 100755 index 0000000000..7763c6d6b0 --- /dev/null +++ b/ci/common @@ -0,0 +1,109 @@ +#!/bin/bash + +## common - source this file with ". ./ci/common" to set environment +## variables and make useful functions available. + +mkdir -p bin +export PATH="$PWD/bin:$PATH" + +## NOTE: export HUB_API_URL is required for it to be accessible from pytest +## +export HUB_API_URL=http://127.0.0.1:8080/hub/api + +## NOTE: We need to allow our CI system to override these env. variables +## +if [ -z ${KUBE_VERSION:-} ]; then + ## NOTE: KUBE_VERSION is limited by the available kindest/node images + ## + ## ref: https://hub.docker.com/r/kindest/node/tags + ## ref: https://github.com/kubernetes/kubernetes/releases + export KUBE_VERSION=1.13.10 +fi +if [ -z ${KIND_VERSION:-} ]; then + ## ref: https://github.com/kubernetes-sigs/kind/releases + export KIND_VERSION=0.5.1 +fi +if [ -z ${HELM_VERSION:-} ]; then + ## ref: https://github.com/helm/helm/releases + export HELM_VERSION=2.14.3 +fi +if [ -z ${KUBEVAL_VERSION:-} ]; then + ## ref: https://github.com/instrumenta/kubeval/releases + export KUBEVAL_VERSION=0.14.0 +fi + +## Valid versions to list under LINT_KUBE_VERSIONS are those in the +## kubernetes-json-schema repoistory, used by kubeval. +## +## ref: https://github.com/instrumenta/kubernetes-json-schema +## +if [ -z ${LINT_KUBE_VERSIONS:-} ]; then + export LINT_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0 +fi + +## NOTE: The setup_... functions cache downloads but ensure the correct version +## +setup_kubectl () { + echo "setup kubectl ${KUBE_VERSION}" + if ! [ -f "bin/kubectl-${KUBE_VERSION}" ]; then + curl -Lo "bin/kubectl-${KUBE_VERSION}" "https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl" + chmod +x "bin/kubectl-${KUBE_VERSION}" + fi + cp "bin/kubectl-${KUBE_VERSION}" bin/kubectl +} + +setup_kind () { + echo "setup kind ${KIND_VERSION}" + if ! [ -f "bin/kind-${KIND_VERSION}" ]; then + curl -Lo "bin/kind-${KIND_VERSION}" "https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64" + chmod +x "bin/kind-${KIND_VERSION}" + fi + cp "bin/kind-${KIND_VERSION}" bin/kind +} + +setup_helm () { + echo "setup helm ${HELM_VERSION}" + if ! [ -f "bin/helm-${HELM_VERSION}" ]; then + curl -Lo "bin/helm-${HELM_VERSION}.tar.gz" "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz" + tar -xf "bin/helm-${HELM_VERSION}.tar.gz" --directory bin --strip-components 1 linux-amd64/helm + rm "bin/helm-${HELM_VERSION}.tar.gz" + mv bin/helm "bin/helm-${HELM_VERSION}" + fi + cp bin/helm-${HELM_VERSION} bin/helm +} + +setup_kubeval () { + echo "setup kubeval ${KUBEVAL_VERSION}" + if ! [ -f "bin/kubeval-${KUBEVAL_VERSION}" ]; then + curl -Lo "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" "https://github.com/instrumenta/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz" + tar -xf "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" --directory bin + rm "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" + mv bin/kubeval "bin/kubeval-${KUBEVAL_VERSION}" + fi + cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval +} + +setup_git_crypt () { + GIT_CRYPT_VERSION=0.5.0 + GIT_CRYPT_VERSION_SHA=46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b + echo "setup git-crypt ${GIT_CRYPT_VERSION}" + if ! [ -f "bin/git-crypt-${GIT_CRYPT_VERSION}" ]; then + curl -Lo "bin/git-crypt-${GIT_CRYPT_VERSION}" https://github.com/minrk/git-crypt-bin/releases/download/${GIT_CRYPT_VERSION}/git-crypt + chmod +x "bin/git-crypt-${GIT_CRYPT_VERSION}" + echo "${GIT_CRYPT_VERSION_SHA} bin/git-crypt-${GIT_CRYPT_VERSION}" | shasum -a 256 -c - + fi + cp bin/git-crypt-${GIT_CRYPT_VERSION} bin/git-crypt +} + +if [ "$1" = "ci" ]; then + export KIND_CLUSTER=jh-ci-${KUBE_VERSION} + export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} +else + setup_kubectl + setup_kind + setup_helm + setup_kubeval + + export KIND_CLUSTER=dev + export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} +fi diff --git a/ci/docker-fixes.sh b/ci/docker-fixes.sh deleted file mode 100755 index 8523cb9625..0000000000 --- a/ci/docker-fixes.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -ex - -# https://github.com/moby/moby/issues/39120 -sudo cat /etc/docker/daemon.json -echo '{"mtu": 1460}' | sudo dd of=/etc/docker/daemon.json -sudo systemctl restart docker -docker ps -a diff --git a/ci/install-kind.sh b/ci/install-kind.sh deleted file mode 100755 index 94c1ca3c99..0000000000 --- a/ci/install-kind.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -set -ex - -mkdir -p bin - -# nsenter is included on xenial - -# install socat (required by helm) -sudo apt-get update && sudo apt-get install -y socat - -# install kubectl, kind -# based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube -if ! [ -f "bin/kubectl" ]; then - echo "installing kubectl" - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl - chmod +x kubectl - mv kubectl bin/ -fi - -if ! [ -f "bin/kind" ]; then - echo "installing kind" - curl -Lo kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64 - chmod +x kind - mv kind bin/ -fi - -echo "installing kubeval" -if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then - curl -sSLo bin/kubeval-${KUBEVAL_VERSION}.tar.gz https://github.com/garethr/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz - tar --extract --file bin/kubeval-${KUBEVAL_VERSION}.tar.gz --directory bin - rm bin/kubeval-${KUBEVAL_VERSION}.tar.gz - mv bin/kubeval bin/kubeval-${KUBEVAL_VERSION} -fi -cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval - -echo "starting cluster with kind" -$PWD/bin/kind create cluster --image kindest/node:v${KUBE_VERSION} -export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" - -kubectl get nodes - -echo "installing helm" -curl -ssL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ - | tar -xz -C bin --strip-components 1 linux-amd64/helm -chmod +x bin/helm - -kubectl --namespace kube-system create sa tiller -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller - - -echo "waiting for tiller" -kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy - -echo "installing git-crypt" -curl -L https://github.com/minrk/git-crypt-bin/releases/download/0.5.0/git-crypt > bin/git-crypt -echo "46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b bin/git-crypt" | shasum -a 256 -c - -chmod +x bin/git-crypt diff --git a/ci/install-minikube.sh b/ci/install-minikube.sh deleted file mode 100755 index 0fc3e9220b..0000000000 --- a/ci/install-minikube.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -set -ex - -mkdir -p bin - -# nsenter is included on xenial - -# install socat (required by helm) -sudo apt-get update && sudo apt-get install -y socat - -# install kubectl, minikube -# based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube -echo "installing kubectl" -curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl -chmod +x kubectl -mv kubectl bin/ - -echo "installing minikube" -curl -Lo minikube https://storage.googleapis.com/minikube/releases/v${MINIKUBE_VERSION}/minikube-linux-amd64 -chmod +x minikube -mv minikube bin/ -# Reduce CI logs clutter -bin/minikube config set WantKubectlDownloadMsg false -bin/minikube config set WantReportErrorPrompt false - -# FIXME: Workaround missing crictl on K8s 1.11 only -# - Issue: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1123 -# - CI fail: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/485093909 -if [ ! -z "${CRICTL_VERSION}" ]; then - echo "installing crictl" - if ! [ -f bin/crictl-${CRICTL_VERSION} ]; then - curl -sSLo bin/crictl-${CRICTL_VERSION}.tar.gz https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz - tar --extract --file bin/crictl-${CRICTL_VERSION}.tar.gz --directory bin - rm bin/crictl-${CRICTL_VERSION}.tar.gz - mv bin/crictl bin/crictl-${CRICTL_VERSION} - fi - cp bin/crictl-${CRICTL_VERSION} bin/crictl - # minikube is run with sudo so the modified PATH is lost - sudo ln -s "${PWD}/bin/crictl-${CRICTL_VERSION}" /usr/bin/crictl -fi - - -echo "installing kubeval" -if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then - curl -sSLo bin/kubeval-${KUBEVAL_VERSION}.tar.gz https://github.com/garethr/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz - tar --extract --file bin/kubeval-${KUBEVAL_VERSION}.tar.gz --directory bin - rm bin/kubeval-${KUBEVAL_VERSION}.tar.gz - mv bin/kubeval bin/kubeval-${KUBEVAL_VERSION} -fi -cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval - -echo "starting minikube with RBAC" -sudo CHANGE_MINIKUBE_NONE_USER=true $PWD/bin/minikube start $MINIKUBE_ARGS -minikube update-context - -# If using CNI the node will not be NotReady until a CNI config exists -if [ "$INSTALL_CALICO" = "1" ]; then - echo "installing calico" - # https://github.com/projectcalico/calico/issues/1456#issuecomment-422957446 - kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/etcd.yaml - kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/rbac.yaml - curl -sf https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/calico.yaml -O - CALICO_ETCD_IP=$(kubectl get service --namespace=kube-system calico-etcd -o jsonpath='{.spec.clusterIP}') - sed -i -e "s/10\.96\.232\.136/$CALICO_ETCD_IP/" calico.yaml - kubectl apply -f calico.yaml - - echo "waiting for calico" - JSONPATH='{.status.numberReady}' - until [ "$(kubectl get daemonsets calico-node -n kube-system -o jsonpath="$JSONPATH")" = "1" ]; do - sleep 1 - done -fi - -echo "waiting for kubernetes" -JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' -until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do - sleep 1 -done -kubectl get nodes - -echo "installing helm" -curl -ssL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ - | tar -xz -C bin --strip-components 1 linux-amd64/helm -chmod +x bin/helm - -kubectl --namespace kube-system create sa tiller -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller - - -echo "waiting for tiller" -kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy - -echo "installing git-crypt" -curl -L https://github.com/minrk/git-crypt-bin/releases/download/0.5.0/git-crypt > bin/git-crypt -echo "46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b bin/git-crypt" | shasum -a 256 -c - -chmod +x bin/git-crypt diff --git a/ci/kind-1.12-default.env b/ci/kind-1.12-default.env deleted file mode 100644 index 41f9075004..0000000000 --- a/ci/kind-1.12-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.12.10 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.13-default.env b/ci/kind-1.13-default.env deleted file mode 100644 index 506fa99ad7..0000000000 --- a/ci/kind-1.13-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.13.10 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env deleted file mode 100644 index 32ce01a12c..0000000000 --- a/ci/kind-1.14-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.14.6 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.15-default.env b/ci/kind-1.15-default.env deleted file mode 100644 index e9a031e390..0000000000 --- a/ci/kind-1.15-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.15.3 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-config.yaml b/ci/kind-config.yaml new file mode 100644 index 0000000000..e71c9ac52a --- /dev/null +++ b/ci/kind-config.yaml @@ -0,0 +1,11 @@ +## kind create cluster --config kind-config.yaml +## +## ref: https://github.com/kubernetes-sigs/kind/blob/master/site/content/docs/user/kind-example-config.yaml +## ref: https://godoc.org/sigs.k8s.io/kind/pkg/apis/config/v1alpha3#Cluster +## +kind: Cluster +apiVersion: kind.sigs.k8s.io/v1alpha3 +networking: + disableDefaultCNI: true +nodes: +- role: control-plane diff --git a/ci/kind-load-docker-images.py b/ci/kind-load-docker-images.py new file mode 100755 index 0000000000..58996177bd --- /dev/null +++ b/ci/kind-load-docker-images.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +""" +Run `kind load docker-image ` on all the docker images within +values.yaml that is available locally on the host as first verified with `docker +images --quiet `. If we could capture this directly from chartpress +build output it would be quicker. +""" + +import sys +import argparse +import pipes +import subprocess + +import yaml + + +def check_output(cmd, **kwargs): + """Run a subcommand and exit if it fails""" + try: + return subprocess.check_output(cmd, **kwargs) + except subprocess.CalledProcessError as e: + print( + "`{}` exited with status {}".format( + " ".join(map(pipes.quote, cmd)), e.returncode + ), + file=sys.stderr, + ) + sys.exit(e.returncode) + + +def get_element_from_path(path, dictionary): + keys = path.split(".") + e = dictionary + for key in keys: + e = e[key] + return e + + +def extract_images_from_values(chartpress_file, values_file): + """Returns a list of image:tag strings given a values.yaml file.""" + + with open(chartpress_file) as f: + chartpress = yaml.full_load(f) + + with open(values_file) as f: + values = yaml.full_load(f) + + image_paths = [] + for chart in chartpress["charts"]: + for k, v in chart["images"].items(): + image_paths.append(v["valuesPath"]) + + images = [] + for image_path in image_paths: + image = get_element_from_path(image_path, values) + images.append(image["name"] + ":" + image["tag"]) + + return images + + +def kind_load_docker_images(kind_cluster, images): + """Calls `kind load docker-image ` on provided images available locally.""" + + for image in images: + if not check_output(["docker", "images", "--quiet", image]): + continue + + check_output(["kind", "load", "docker-image", "--name", kind_cluster, image]) + print("### Loaded %s" % image) + + +if __name__ == "__main__": + argparser = argparse.ArgumentParser() + argparser.add_argument( + "--kind-cluster", + default="kind", + help="Specify a kind cluster to load the docker images into.", + ) + argparser.add_argument( + "--values", + default="jupyterhub/values.yaml", + help="Specify a values.yaml file to look in.", + ) + argparser.add_argument( + "--chartpress", + default="chartpress.yaml", + help="Specify a chartpress.yaml with information about where to look for images.", + ) + args = argparser.parse_args() + + images = extract_images_from_values( + chartpress_file=args.chartpress, values_file=args.values + ) + kind_load_docker_images(args.kind_cluster, images) diff --git a/ci/minikube-1.11-default.env b/ci/minikube-1.11-default.env deleted file mode 100644 index 69be176f95..0000000000 --- a/ci/minikube-1.11-default.env +++ /dev/null @@ -1,15 +0,0 @@ -export KUBE_VERSION=1.11.7 -export MINIKUBE_VERSION=0.33.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export MINIKUBE_ARGS="--vm-driver=none --kubernetes-version=v${KUBE_VERSION}" -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 - -# FIXME: Issue 1123 -export CRICTL_VERSION=1.11.1 diff --git a/ci/minikube-1.12-netpol.env b/ci/minikube-1.12-netpol.env deleted file mode 100644 index 65863b74f2..0000000000 --- a/ci/minikube-1.12-netpol.env +++ /dev/null @@ -1,12 +0,0 @@ -export KUBE_VERSION=1.12.5 -export MINIKUBE_VERSION=0.33.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export MINIKUBE_ARGS="--vm-driver=none --kubernetes-version=v${KUBE_VERSION} --network-plugin cni --extra-config=kubelet.network-plugin=cni" -export Z2JH_HELM_ARGS="-f minikube-config.yaml -f minikube-netpol.yaml" -export DISABLE_TEST_NETPOL=0 -export INSTALL_CALICO=1 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/minikube-1.13-default.env b/ci/minikube-1.13-default.env deleted file mode 100644 index 84b51dab60..0000000000 --- a/ci/minikube-1.13-default.env +++ /dev/null @@ -1,12 +0,0 @@ -export KUBE_VERSION=1.13.2 -export MINIKUBE_VERSION=0.33.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export MINIKUBE_ARGS="--vm-driver=none --kubernetes-version=v${KUBE_VERSION}" -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=1 diff --git a/ci/publish-chart.sh b/ci/publish similarity index 91% rename from ci/publish-chart.sh rename to ci/publish index daa89417fb..01e5bbb0e9 100755 --- a/ci/publish-chart.sh +++ b/ci/publish @@ -4,7 +4,7 @@ set -eu # Decrypt a private SSH key having its public key registered on GitHub. It will # be used to establish an identity with rights to push to the repo hosting our # Helm charts: https://github.com/jupyterhub/helm-chart -openssl aes-256-cbc -K $encrypted_c6b45058ffe8_key -iv $encrypted_c6b45058ffe8_iv -in ci/id_rsa.enc -out ci/id_rsa -d +openssl aes-256-cbc -K $encrypted_c6b45058ffe8_key -iv $encrypted_c6b45058ffe8_iv -in ci/publish-id_rsa.enc -out ci/id_rsa -d chmod 0400 ci/id_rsa docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" @@ -20,4 +20,4 @@ chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart # Let us log the changes chartpress did, it should include replacements for # fields in values.yaml, such as what tag for various images we are using. -git diff +git --no-pager diff diff --git a/ci/id_rsa.enc b/ci/publish-id_rsa.enc similarity index 100% rename from ci/id_rsa.enc rename to ci/publish-id_rsa.enc diff --git a/ci/start-k8s b/ci/start-k8s new file mode 100755 index 0000000000..ce87244736 --- /dev/null +++ b/ci/start-k8s @@ -0,0 +1,100 @@ +#!/bin/bash +set -eu + +## NOTE: This script assumes we have installed kind, but the common script doesn't +## +if [ "${KIND_CLUSTER:-}" == "" ]; then + echo "Run \". ./dev init\" first!" + exit 1 +elif [ "${KIND_CLUSTER:-}" != "dev" ]; then + if [ "${KUBECONFIG:-}" != "$(kind get kubeconfig-path --name="jh-ci-${KUBE_VERSION:-}")" ]; then + echo "Assertion error: KUBECONFIG out of sync with KUBE_VERSION" + echo "KUBECONFIG=${KUBECONFIG:-}" + echo "KUBE_VERSION=${KUBE_VERSION:-}" + echo "Run \". ./ci/common\" to update your KUBECONFIG environment variable based on your KUBE_VERSION variable." + exit 1 + elif [ "${KIND_CLUSTER:-}" != "jh-ci-${KUBE_VERSION:-}" ]; then + echo "Assertion error: KIND_CLUSTER out of sync with KUBE_VERSION" + echo "KIND_CLUSTER=${KIND_CLUSTER:-}" + echo "KUBE_VERSION=${KUBE_VERSION:-}" + echo "Run \". ./ci/common\" to update your KIND_CLUSTER environment variable based on your KUBE_VERSION variable." + exit 1 + fi +fi + +# If the kind k8s cluster for this k8s version is already running, restart it +if kind get clusters | grep --word-regexp ${KIND_CLUSTER}; then + echo "deleting existing kind k8s cluster: ${KIND_CLUSTER}" + kind delete cluster --name=${KIND_CLUSTER} +fi + +echo "starting kind k8s cluster: ${KIND_CLUSTER}" +kind create cluster --name=${KIND_CLUSTER} --image="kindest/node:v${KUBE_VERSION}" --config ci/kind-config.yaml +kubectl config set-context --current --namespace jh-ci +kubectl get nodes + +# To test network policies, we need a custom CNI like Calico. We have disabled +# the default CNI through kind-config.yaml and will need to manually install a +# CNI for the nodes to become Ready. +echo "installing a custom CNI: Calico (async, in cluster)" +# Setup daemonset/calico-etcd, a prerequisite for calico-node +kubectl apply -f https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/etcd.yaml +# NOTE: A toleration to schedule on a node that isn't ready is missing, but +# this pod will be part of making sure the node can become ready. +# +# toleration: +# - key: node.kubernetes.io/not-ready +# effect: NoSchedule +kubectl patch -n kube-system daemonset/calico-etcd --type='json' \ + -p='[{"op":"add", "path":"/spec/template/spec/tolerations/-", "value":{"key":"node.kubernetes.io/not-ready", "effect":"NoSchedule"}}]' + +# Setup daemonset/calico-node, that will allow nodes to enter a ready state +curl -sSo ci/daemonset-calico-node.yaml https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/calico.yaml +# NOTE: Connection details to daemonset/calico-etcd is missing so we need to +# manually add them. +CALICO_ETCD_IP=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.clusterIP}') +CALICO_ETCD_PORT=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.ports[0].port}') +sed -i -e "s/:/$CALICO_ETCD_IP:$CALICO_ETCD_PORT/" ci/daemonset-calico-node.yaml +kubectl apply -f ci/daemonset-calico-node.yaml +# NOTE: daemonset/calico-node pods' main container fails to startup without +# an additional environment variable configured to disable a check +# that we fail. +# +# env: +# - name: FELIX_IGNORELOOSERPF +# value: "true" +kubectl patch -n kube-system daemonset/calico-node --type='json' \ + -p='[{"op":"add", "path":"/spec/template/spec/containers/0/env/-", "value":{"name":"FELIX_IGNORELOOSERPF", "value":"true"}}]' + +echo "waiting for kubernetes nodes (in cluster)" +# NOTE: kubectl wait has a bug relating to using the --all flag in 1.13 at least +# Due to this, we wait only for the kind-control-plane node, which +# currently is the only node we start with kind but could be configured in +# kind-config.yaml. +# +# ref: https://github.com/kubernetes/kubernetes/pull/71746 +kubectl wait node/${KIND_CLUSTER}-control-plane --for condition=ready --timeout 2m || { + r=$? + echo "kubernetes nodes never became ready" + kubectl describe nodes || true + kubectl describe -n kube-system daemonset/calico-etcd || true + kubectl logs -n kube-system daemonset/calico-etcd || true + kubectl describe -n kube-system daemonset/calico-node || true + kubectl logs -n kube-system daemonset/calico-node || true + exit $r +} + +echo "installing tiller (async, in cluster)" +kubectl create serviceaccount tiller -n kube-system +kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller +helm init --service-account tiller + +echo "waiting for tiller (in cluster)" +kubectl rollout status -n kube-system deployment/tiller-deploy --timeout 1m || { + r=$? + echo "tiller never became ready" + kubectl describe nodes || true + kubectl describe -n kube-system deployment/tiller || true + kubectl logs -n kube-system deployment/tiller || true + exit $r +} diff --git a/ci/test b/ci/test new file mode 100755 index 0000000000..d7d3993d49 --- /dev/null +++ b/ci/test @@ -0,0 +1,30 @@ +#!/bin/bash +set -eu + +display_logs() { + echo "***** node *****" + kubectl describe node + echo "***** pods *****" + kubectl get pods + echo "***** events *****" + kubectl get events + echo "***** hub *****" + kubectl logs deploy/hub + echo "***** proxy *****" + kubectl logs deploy/proxy +} + +echo "running tests from outside the cluster:" +echo "- kubectl port-forward has enabled communication with services in the cluster" +## NOTE: -x / --exitfirst makes us avoid noise in the hub and proxy pod logs +## following a failure we are interested in debugging. +## +pytest ./tests -v --exitfirst || { + r=$? + echo "a test failed, here is relevant debugging information" + display_logs + exit $r +} + +## If tests succeeded show all pods to see if any were restarted +kubectl get pods diff --git a/ci/test.sh b/ci/test.sh deleted file mode 100755 index 7590a81396..0000000000 --- a/ci/test.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/sh - -set -eux - - -TEST_NAMESPACE=jupyterhub-test - -if [ "$RUNNER" = "kind" ]; then - export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" -else - # Is there a standard interface name? - for iface in eth0 ens4 enp0s3; do - IP=$(/sbin/ifconfig $iface | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'); - if [ -n "$IP" ]; then - echo "IP: $IP" - break - fi - done - if [ -z "$IP" ]; then - echo "Failed to get IP, current interfaces:" - /sbin/ifconfig -a - exit 2 - fi -fi - -helm install --wait --name jupyterhub-test --namespace $TEST_NAMESPACE ./jupyterhub/ $Z2JH_HELM_ARGS - -if [ "$RUNNER" = "kind" ]; then - kubectl port-forward -n $TEST_NAMESPACE svc/proxy-public 8080:80 & - TEST_URL=http://127.0.0.1:8080 - export HUB_API_URL=http://127.0.0.1:8080/hub/api -else - TEST_URL=http://$IP:31212 -fi - -echo "waiting for servers to become responsive" -until curl --fail -s $TEST_URL/hub/api; do - kubectl --namespace=$TEST_NAMESPACE describe pod - sleep 10 -done - -echo "getting jupyterhub version" -curl -s $TEST_URL/hub/api | grep version - -echo "running tests" - -display_logs() { - echo "***** minikube *****" - minikube logs - echo "***** node *****" - kubectl describe node - echo "***** pods *****" - kubectl --namespace $TEST_NAMESPACE get pods - echo "***** events *****" - kubectl --namespace $TEST_NAMESPACE get events - echo "***** hub *****" - kubectl --namespace $TEST_NAMESPACE logs deploy/hub - echo "***** proxy *****" - kubectl --namespace $TEST_NAMESPACE logs deploy/proxy -} - -# Run this first to ensure the hub can talk to the proxy -# (it will automatically retry) -pytest tests/test_hub_is_ready.py - -# Now sleep, and retry again, in case a race condition meant the two were -# momentarily able to communicate whilst already shutting down -sleep 1m -pytest tests/test_hub_is_ready.py - -# Hopefully this works now! If tests still failing output logs -pytest || { - r=$? - echo "tests failed" - display_logs - exit $r -} - -# If tests succeeded show all pods to see if any were restarted -kubectl --namespace $TEST_NAMESPACE get pods diff --git a/ci/travis-docker-fix b/ci/travis-docker-fix new file mode 100755 index 0000000000..e7f04525f1 --- /dev/null +++ b/ci/travis-docker-fix @@ -0,0 +1,13 @@ +#!/bin/bash +set -eu + +# This is a workaround to an issue caused by the existance of a docker registry +# mirror in our CI environment. Without this fix that removes the mirror, +# chartpress fails to realize the existance of already built images and rebuilds +# them. +# +# ref: https://github.com/moby/moby/issues/39120 +sudo cat /etc/docker/daemon.json +echo '{"mtu": 1460}' | sudo dd of=/etc/docker/daemon.json +sudo systemctl restart docker +docker ps -a diff --git a/ci/travis-script.sh b/ci/travis-script.sh deleted file mode 100755 index 408d0a06c5..0000000000 --- a/ci/travis-script.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -eux - -python3 tools/templates/lint-and-validate.py -# render & publish chart -if [[ - "$TRAVIS_BRANCH" == "master" && - "$TRAVIS_PULL_REQUEST" == "false" && - "$RUN_PUBLISH_SCRIPT" == "1" -]]; then - ./ci/publish-chart.sh -else - chartpress --commit-range ${TRAVIS_COMMIT_RANGE} -fi -git diff - -./ci/test.sh diff --git a/ci/upgrade b/ci/upgrade new file mode 100755 index 0000000000..30f2180ce7 --- /dev/null +++ b/ci/upgrade @@ -0,0 +1,35 @@ +#!/bin/bash +set -eu + +## set TRAVIS_COMMIT_RANGE if it is unset on a local CI run +## +## NOTE: Use an open ended range from the upstream or origin master branch to the +## current state including unstaged changes. +## +if [ -z ${TRAVIS_COMMIT_RANGE:-} ]; then + if git remote -v | grep --word-regex upstream; then + GIT_REMOTE=upstream/ + elif git remote -v | grep --word-regex origin; then + GIT_REMOTE=origin/ + fi + export TRAVIS_COMMIT_RANGE=${GIT_REMOTE:-}master.. +fi + +echo "build images and update the default values.yaml to reference them" +chartpress --commit-range ${TRAVIS_COMMIT_RANGE} +git --no-pager diff + +echo "load the images the kind cluster" +python3 ci/kind-load-docker-images.py --kind-cluster $KIND_CLUSTER --values ./jupyterhub/values.yaml + +echo "install our deployment" +helm upgrade --install jh-ci --wait --namespace jh-ci ./jupyterhub \ + --values dev-config.yaml \ + --values dev-config-netpol.yaml + +echo "waiting for hub and proxy to become responsive" +kubectl rollout status deployment/proxy --timeout 1m +kubectl rollout status deployment/hub --timeout 1m + +echo "couple a localhost port with svc/proxy-public to access JupyterHub API" +kubectl port-forward svc/proxy-public 8080:80 > /dev/null 2>&1 & diff --git a/ci/vagrant-run-kind.sh b/ci/vagrant-run-kind.sh deleted file mode 100755 index 06f56d19af..0000000000 --- a/ci/vagrant-run-kind.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# Run this inside vagrant to test the travis scripts - -set -eux -export SCENARIO=1.15-default -export RUNNER=kind -export TRAVIS_BRANCH=master -export TRAVIS_PULL_REQUEST=true -export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse --short HEAD` - -pip3 install --no-cache-dir -r dev-requirements.txt -. ./ci/kind-${SCENARIO}.env -./ci/install-kind.sh -./ci/travis-script.sh diff --git a/ci/vagrant-run.sh b/ci/vagrant-run.sh deleted file mode 100755 index fed4d2f91a..0000000000 --- a/ci/vagrant-run.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -# Run this inside vagrant to test the travis scripts - -set -eux -export SCENARIO=1.12-netpol -export TRAVIS_BRANCH=master -export TRAVIS_PULL_REQUEST=true -export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse --short HEAD` - -pip3 install --no-cache-dir -r dev-requirements.txt -. ./ci/minikube-${SCENARIO}.env -./ci/install-minikube.sh -./ci/travis-script.sh diff --git a/ci/xenial-setup.sh b/ci/xenial-setup.sh deleted file mode 100755 index a28ff44692..0000000000 --- a/ci/xenial-setup.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh - -set -eux - -apt-get -q update -apt-get -q install -y python3-pip - -# https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#install-docker-ce -#apt-get -q install -y linux-image-extra-$(uname -r) linux-image-extra-virtual - -DOCKER_DEB=docker-ce_18.06.0~ce~3-0~ubuntu_amd64.deb -curl -O https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/$DOCKER_DEB -# dpkg won't install dependencies -dpkg -i $DOCKER_DEB || apt-get install -f -y -docker info -usermod -G docker vagrant - -install -o vagrant -g vagrant -d /home/vagrant/bin - -# Workaround Minikube DNS problems -# https://github.com/kubernetes/minikube/issues/2027#issuecomment-338221646 -cat << EOF > /etc/resolv.conf -nameserver 8.8.4.4 -nameserver 8.8.8.8 -EOF -sed -i -re "s/^(127.0.0.1\\s.+)/\\1 `hostname`/" /etc/hosts - -# chartpress requires Python 3.6+, Xenial has 3.5 -# http://ubuntuhandbook.org/index.php/2017/07/install-python-3-6-1-in-ubuntu-16-04-lts/ -add-apt-repository -y ppa:jonathonf/python-3.6 -apt-get update -apt-get install -y python3.6 -update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 1 diff --git a/dev b/dev new file mode 100755 index 0000000000..a6ac040bc0 --- /dev/null +++ b/dev @@ -0,0 +1,38 @@ +#!/bin/bash + +## dev is a script to help us get started performing typical task during local +## development without needing to learn everything at once +## +## - init +## - start-k8s +## - upgrade +## - lint-and-validate +## - test +## + +## if the script is sourced +if [ "${BASH_SOURCE[0]}" != "${0}" ]; then + if ! [ "$1" = "init" ]; then + echo "Only source the init command, run your command without a leading dot!" + else + if [ "$1" = "init" ]; then + . ./ci/common + pip3 install -r dev-requirements.txt + fi + fi +## else, the script isn't sourced +else + if [ "$1" = "init" ]; then + echo "The init command needs to be sourced, run it with \". ./ci/dev init\"" + else + if [ "$1" = "start-k8s" ]; then + ./ci/start-k8s + elif [ "$1" = "upgrade" ]; then + ./ci/upgrade + elif [ "$1" = "lint-and-validate" ]; then + python3 tools/templates/lint-and-validate.py --kubernetes-versions 1.13.0 + elif [ "$1" = "test" ]; then + ./ci/upgrade && ./ci/test + fi + fi +fi diff --git a/minikube-netpol.yaml b/dev-config-netpol.yaml similarity index 100% rename from minikube-netpol.yaml rename to dev-config-netpol.yaml diff --git a/minikube-config.yaml b/dev-config.yaml similarity index 100% rename from minikube-config.yaml rename to dev-config.yaml diff --git a/dev-requirements.txt b/dev-requirements.txt index c444f67c04..740c5fe01c 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,10 +1,22 @@ -# chartpress is important for local development, CI and CD -# - builds images and can push them also (--push) -# - updates image names and tags in values.yaml -# - can publish the built Helm chart (--publish) +## chartpress is important for local development, CI and CD +## - builds images and can push them also (--push) +## - updates image names and tags in values.yaml +## - can publish the built Helm chart (--publish) +## +## chartpress is used by +## - test +## - publish +## +## ref: https://github.com/jupyterhub/chartpress +## chartpress==0.3.1 -# yamllint and pytest are important for local development, CI +## pytest run tests that require requests, pytest is run from test +## script +## pytest>=3.7.1 requests -yamllint>=1.1.1 + +## yamllint is used by the tools/templates/lint-and-validate.py +## +yamllint>=1.17.0 diff --git a/images/hub/Dockerfile b/images/hub/Dockerfile index 2f8e483a99..58a545222f 100644 --- a/images/hub/Dockerfile +++ b/images/hub/Dockerfile @@ -1,5 +1,7 @@ FROM ubuntu:18.04 +## NOTE: This is a default and be overrridden by chartpress using the +## chartpress.yaml configuration ARG JUPYTERHUB_VERSION=1.0.* RUN apt-get update && \ diff --git a/images/singleuser-sample/Dockerfile b/images/singleuser-sample/Dockerfile index 6131c9b0e5..332cd40aa1 100644 --- a/images/singleuser-sample/Dockerfile +++ b/images/singleuser-sample/Dockerfile @@ -9,7 +9,7 @@ FROM jupyter/base-notebook:8ccdfc1da8d5 # Example install of git and nbgitpuller. # NOTE: git is already available in the jupyter/minimal-notebook image. USER root -RUN apt-get update && apt-get install --yes \ +RUN apt-get update && apt-get install --yes --no-install-recommends \ git \ && rm -rf /var/lib/apt/lists/* USER $NB_USER diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 9eb00fa384..e9dafbe531 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,6 +1,6 @@ name: jupyterhub -version: 0.9-dev -appVersion: 1.0.0 +version: 0.9-0924e65 +appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org sources: diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml index 695d96014e..abcb8e32ec 100644 --- a/jupyterhub/values.yaml +++ b/jupyterhub/values.yaml @@ -48,7 +48,7 @@ hub: extraVolumeMounts: [] image: name: jupyterhub/k8s-hub - tag: generated-by-chartpress + tag: 'generated-by-chartpress' # pullSecrets: # - secretName resources: @@ -203,7 +203,7 @@ singleuser: networkTools: image: name: jupyterhub/k8s-network-tools - tag: generated-by-chartpress + tag: 'generated-by-chartpress' cloudMetadata: enabled: false ip: 169.254.169.254 @@ -245,7 +245,7 @@ singleuser: storageAccessModes: [ReadWriteOnce] image: name: jupyterhub/k8s-singleuser-sample - tag: generated-by-chartpress + tag: 'generated-by-chartpress' pullPolicy: IfNotPresent # pullSecrets: # - secretName @@ -316,7 +316,7 @@ prePuller: enabled: true image: name: jupyterhub/k8s-image-awaiter - tag: generated-by-chartpress + tag: 'generated-by-chartpress' continuous: enabled: true extraImages: {} diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..ffc9e22431 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,3 @@ +# What is this folder about? + +We have setup tests for [pytest](https://docs.pytest.org/en/latest/) that will run in our CI/CD pipeline on Travis. These test assumes it is able to speak directly to a running hub within a Kubernetes cluster etc. In practice, they assume you have been using `dev` script to set it all up. diff --git a/tests/conftest.py b/tests/conftest.py index e74f169ed3..e7e9abb0b7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,14 +1,18 @@ +## conftest.py has a special meaning to pytest +## ref: https://docs.pytest.org/en/latest/writing_plugins.html#conftest-py-plugins +## import os import requests -import pytest import uuid + +import pytest import yaml @pytest.fixture(scope='module') def request_data(): basedir = os.path.dirname(os.path.dirname(__file__)) - with open(os.path.join(basedir, 'minikube-config.yaml')) as f: + with open(os.path.join(basedir, 'dev-config.yaml')) as f: y = yaml.safe_load(f) token = y['hub']['services']['test']['apiToken'] return { diff --git a/tests/test_hub_is_ready.py b/tests/test_hub_is_ready.py deleted file mode 100644 index 0a2c5e7a8c..0000000000 --- a/tests/test_hub_is_ready.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import time - - -def test_hub_can_talk_to_proxy(api_request, request_data): - endtime = time.time() + request_data['test_timeout'] - while time.time() < endtime: - try: - r = api_request.get('/proxy') - if r.status_code == 200: - break - print(r.json()) - except requests.RequestException as e: - print(e) - time.sleep(1) - assert r.status_code == 200, 'Failed to get /proxy' diff --git a/tests/test_spawn.py b/tests/test_spawn.py index cb78ec3aa7..bf49542c98 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -6,6 +6,12 @@ import requests import yaml +## DEV NOTES: +## A lot of logs are currently in the code for debugging purposes. +## +## ref: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/589410196 +## + # Makes heavy use of JupyterHub's API: # http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml @@ -17,12 +23,17 @@ chart = yaml.safe_load(f) jupyterhub_version = chart['appVersion'] + def test_api(api_request): print("asking for the hub's version") r = api_request.get('') assert r.status_code == 200 assert r.json().get("version", "version-missing") == jupyterhub_version + """kubectl logs deploy/hub - on a successful run + [I 2019-09-25 12:03:12.051 JupyterHub log:174] 200 GET /hub/api (test@127.0.0.1) 9.57ms + """ + def test_api_info(api_request): print("asking for the hub information") @@ -31,24 +42,71 @@ def test_api_info(api_request): result = r.json() assert result['spawner']['class'] == 'kubespawner.spawner.KubeSpawner' - -def test_api_create_user(api_request, jupyter_user): - print("creating the testuser") - # Already created by the jupyter_user fixture + """kubectl logs deploy/hub - on a successful run + [I 2019-09-25 12:03:12.086 JupyterHub log:174] 200 GET /hub/api/info (test@127.0.0.1) 10.21ms + """ + + +def test_hub_api_create_user_and_get_information_about_user(api_request, jupyter_user): + # NOTE: The jupyter user is created and commited to the hub database through + # the jupyter_user pytest fixture declared in conftest.py. Due to + # this, this first test is actually testing both the fixture to create + # the user, and the ability to get information from the hub about the + # user. + # + # Also note that the fixture will automatically clean up the + # user from the hub's database when the function exit. + print("create a user, and get information about the user") r = api_request.get('/users/' + jupyter_user) assert r.status_code == 200 assert r.json()['name'] == jupyter_user + """kubectl logs deploy/hub - on a successful run + [I 2019-09-25 12:03:12.126 JupyterHub log:174] 201 POST /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 20.74ms + [I 2019-09-25 12:03:12.153 JupyterHub log:174] 200 GET /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 11.91ms + [D 2019-09-25 12:03:12.180 JupyterHub user:240] Creating for testuser-7c70eb90-035b-4d9f-92a5-482e441e307d: + [I 2019-09-25 12:03:12.204 JupyterHub reflector:199] watching for pods with label selector='component=singleuser-server' in namespace jh-ci + [D 2019-09-25 12:03:12.205 JupyterHub reflector:202] Connecting pods watcher + [I 2019-09-25 12:03:12.229 JupyterHub reflector:199] watching for events with field selector='involvedObject.kind=Pod' in namespace jh-ci + [D 2019-09-25 12:03:12.229 JupyterHub reflector:202] Connecting events watcher + [I 2019-09-25 12:03:12.269 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 98.85ms + """ + -def test_api_list_users(api_request, jupyter_user): - print("asking for information") +def test_hub_api_list_users(api_request, jupyter_user): + print("create a test user, get information about all users, and find the test user") r = api_request.get('/users') assert r.status_code == 200 assert any(u['name'] == jupyter_user for u in r.json()) + """kubectl logs deploy/hub - on a successful run + [I 2019-09-25 12:03:12.303 JupyterHub log:174] 201 POST /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 15.53ms + [I 2019-09-25 12:03:12.331 JupyterHub log:174] 200 GET /hub/api/users (test@127.0.0.1) 10.83ms + [D 2019-09-25 12:03:12.358 JupyterHub user:240] Creating for testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f: + [I 2019-09-25 12:03:12.365 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 18.44ms + """ + -def test_api_request_user_spawn(api_request, jupyter_user, request_data): - print("asking kubespawner to spawn testusers singleuser-server pod") +def test_hub_can_talk_to_proxy(api_request, request_data): + endtime = time.time() + request_data['test_timeout'] + while time.time() < endtime: + try: + r = api_request.get('/proxy') + if r.status_code == 200: + break + print(r.json()) + except requests.RequestException as e: + print(e) + time.sleep(1) + assert r.status_code == 200, 'Failed to get /proxy' + + """kubectl logs deploy/hub - on a successful run + [I 2019-09-25 12:03:12.395 JupyterHub log:174] 200 GET /hub/api/proxy (test@127.0.0.1) 13.48ms + """ + + +def test_hub_api_request_user_spawn(api_request, jupyter_user, request_data): + print("asking kubespawner to spawn a server for a test user") r = api_request.post('/users/' + jupyter_user + '/server') assert r.status_code in (201, 202) try: @@ -61,10 +119,8 @@ def test_api_request_user_spawn(api_request, jupyter_user, request_data): _delete_server(api_request, jupyter_user, request_data['test_timeout']) -@pytest.mark.skipif(os.getenv('DISABLE_TEST_NETPOL') == '1', - reason="DISABLE_TEST_NETPOL set") def test_singleuser_netpol(api_request, jupyter_user, request_data): - print("asking kubespawner to spawn a singleuser-server pod to test network policies") + print("asking kubespawner to spawn a server for a test user to test network policies") r = api_request.post('/users/' + jupyter_user + '/server') assert r.status_code in (201, 202) try: @@ -73,17 +129,17 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): print(server_model) pod_name = server_model['state']['pod_name'] - # Must match CIDR in minikube-netpol.yaml + # Must match CIDR in dev-config-netpol.yaml allowed_url = 'http://jupyter.org' blocked_url = 'http://mybinder.org' c = subprocess.run([ - 'kubectl', '--namespace=jupyterhub-test', 'exec', pod_name, '--', + 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', 'wget', '-q', '-t1', '-T5', allowed_url]) assert c.returncode == 0, "Unable to get allowed domain" c = subprocess.run([ - 'kubectl', '--namespace=jupyterhub-test', 'exec', pod_name, '--', + 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', 'wget', '-q', '-t1', '-T5', blocked_url]) assert c.returncode > 0, "Blocked domain was allowed" @@ -94,7 +150,8 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): endtime = time.time() + timeout while time.time() < endtime: - # FIXME: This can fail with 503! Make it robuster than this! + # NOTE: If this fails with a 503 response from the proxy, the hub pod has + # probably crashed by the tests interaction with it. r = api_request.get('/users/' + jupyter_user) r.raise_for_status() user_model = r.json() @@ -114,6 +171,9 @@ def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): def _delete_server(api_request, jupyter_user, timeout): + # NOTE: If this fails with a 503 response from the proxy, the hub pod has + # probably crashed by the tests interaction with it. + r = api_request.delete('/users/' + jupyter_user + '/server') assert r.status_code in (202, 204) diff --git a/tools/templates/lint-and-validate.py b/tools/templates/lint-and-validate.py index c7b209bcb2..bb65ef401f 100755 --- a/tools/templates/lint-and-validate.py +++ b/tools/templates/lint-and-validate.py @@ -8,15 +8,14 @@ pip install yamllint -- https://github.com/garethr/kubeval +- https://github.com/instrumenta/kubeval -LATEST=curl --silent "https://api.github.com/repos/garethr/kubeval/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' -wget https://github.com/garethr/kubeval/releases/download/$LATEST/kubeval-linux-amd64.tar.gz +LATEST=curl --silent "https://api.github.com/repos/instrumenta/kubeval/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' +wget https://github.com/instrumenta/kubeval/releases/download/$LATEST/kubeval-linux-amd64.tar.gz tar xf kubeval-darwin-amd64.tar.gz mv kubeval /usr/local/bin """ - import os import sys import argparse @@ -40,7 +39,7 @@ def check_call(cmd, **kwargs): ) sys.exit(e.returncode) -def lint(yamllint_config, values, kubernetes_version, output_dir, debug): +def lint(yamllint_config, values, kubernetes_versions, output_dir, debug): """Calls `helm lint`, `helm template`, `yamllint` and `kubeval`.""" print("### Clearing output directory") @@ -52,7 +51,7 @@ def lint(yamllint_config, values, kubernetes_version, output_dir, debug): ]) print("### Linting started") - print("### 1/4 - helm lint") + print("### 1/4 - helm lint: lint helm templates") helm_lint_cmd = [ 'helm', 'lint', '../../jupyterhub', '--values', values, @@ -61,7 +60,7 @@ def lint(yamllint_config, values, kubernetes_version, output_dir, debug): helm_lint_cmd.append('--debug') check_call(helm_lint_cmd) - print("### 2/4 - helm template") + print("### 2/4 - helm template: generate kubernetes resources") helm_template_cmd = [ 'helm', 'template', '../../jupyterhub', '--values', values, @@ -71,31 +70,33 @@ def lint(yamllint_config, values, kubernetes_version, output_dir, debug): helm_template_cmd.append('--debug') check_call(helm_template_cmd) - print("### 3/4 - yamllint") + print("### 3/4 - yamllint: yaml lint generated kubernetes resources") check_call([ 'yamllint', '-c', yamllint_config, output_dir ]) - print("### 4/4 - kubeval") - for filename in glob.iglob(output_dir + '/**/*.yaml', recursive=True): - check_call([ - 'kubeval', filename, - '--kubernetes-version', kubernetes_version, - '--strict', - ]) + print("### 4/4 - kubeval: validate generated kubernetes resources") + for kubernetes_version in kubernetes_versions.split(","): + print("#### kubernetes_version ", kubernetes_version) + for filename in glob.iglob(output_dir + '/**/*.yaml', recursive=True): + check_call([ + 'kubeval', filename, + '--kubernetes-version', kubernetes_version, + '--strict', + ]) print() - print("### Linting and validation of templates finished: All good!") + print("### Linting and validation of helm templates and generated kubernetes resources OK!") if __name__ == '__main__': argparser = argparse.ArgumentParser() argparser.add_argument('--debug', action='store_true', help='Run helm lint and helm template with the --debug flag') argparser.add_argument('--values', default='lint-and-validate-values.yaml', help='Specify Helm values in a YAML file (can specify multiple)') - argparser.add_argument('--kubernetes-version', default='1.11.0', help='Version of Kubernetes to validate against') + argparser.add_argument('--kubernetes-versions', default='1.15.0', help='List of Kubernetes versions to validate against separated by ","') argparser.add_argument('--output-dir', default='rendered-templates', help='Output directory for the rendered templates. Warning: content in this will be wiped.') argparser.add_argument('--yamllint-config', default='yamllint-config.yaml', help='Specify a yamllint config') args = argparser.parse_args() - lint(args.yamllint_config, args.values, args.kubernetes_version, args.output_dir, args.debug) + lint(args.yamllint_config, args.values, args.kubernetes_versions, args.output_dir, args.debug) diff --git a/vagrant-vm-setup.sh b/vagrant-vm-setup.sh new file mode 100644 index 0000000000..e173a10b78 --- /dev/null +++ b/vagrant-vm-setup.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -eu + +## Install pip +## +## NOTE: pip installs executable packages in ~/.local/bin +## +apt-get -q update +apt-get -q install -y python3-pip +echo 'PATH=$PATH:~/.local/bin' >> /home/vagrant/.bashrc + +## Install Docker CE +## +## ref: https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-using-the-convenience-script +## +curl -sSL https://get.docker.com | sh +usermod -aG docker vagrant + +## When we run ./ci/vagrant-run-ci.sh we get some environment variables set, +## but these will be lost if the script quits due to an error. +echo 'PATH=$PATH:~/zero-to-jupyterhub-k8s/bin' >> /home/vagrant/.bashrc From 162632a088af21494130a4a092fd567d756b344b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 30 Sep 2019 17:23:36 +0200 Subject: [PATCH 16/77] Release instructions revised --- .travis.yml | 4 +- RELEASE.md | 279 ++++++++++++------------------------------ ci/publish | 6 +- jupyterhub/Chart.yaml | 2 +- tools/contributors.py | 33 +++-- 5 files changed, 105 insertions(+), 219 deletions(-) diff --git a/.travis.yml b/.travis.yml index 150c552f9c..b1367cf2e0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -dist: xenial +dist: bionic language: python python: - 3.6 @@ -81,3 +81,5 @@ jobs: ## ref: https://docs.travis-ci.com/user/environment-variables/#encrypting-environment-variables - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= + on: + tags: true diff --git a/RELEASE.md b/RELEASE.md index 158f24f4b0..ab5b64d63b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,203 +1,86 @@ -# Releasing a new version of the helm chart +# Release process -The following steps can be followed to release a new version of the Helm Chart. +Start by making a release issue using the template below, it can be followed to +release a new version of the Helm chart and help everybody coordinate. Do some +copy pasting! -## Create an issue for the new release +## Issue title: Release x.y.z -Use this issue to coordinate efforts and keep track of progress. You can -copy / paste the raw Markdown from the following list, which will be covered -in more detail below. - -``` -Title: Release {{release-name}} -Content: - -This issue will be used to coordinate the next release of the helm -chart, {{release-name}}. Instructions for creating the release can be found in -[CONTRIBUTING.md](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CONTRIBUTING.md#releasing-a-new-version-of-the-helm-chart). +This issue will be used to coordinate the next release of the Helm +chart according to the instructions in [RELEASE.md](RELEASE.md). Below is the checklist for this release. -- [ ] Code, tests, and documentation to support a release are stable. -- [ ] Make a CHANGELOG -- [ ] Generate and add the list of contributors -- [ ] Build and push a new Docker image to DockerHub -- [ ] Commit version bump in `Chart.yaml` and `Values.yaml` -- [ ] Update references in documentation to the new version (note: documentation - should be stable and there should be no anticipated major changes to content). -- [ ] Confirm that a new deployment using the updated instructions works -- [ ] Create and push a new tag for this release -- [ ] Create and publish a new GitHub release -- [ ] Write / publish a blog post based largely off of the CHANGELOG -- [ ] Set ReadTheDocs to begin using `latest` by default -- [ ] Celebrate! -``` - -As there are often many documentation improvements following the release of -a new version, we set ReadTheDocs to serve `latest/` until the first docs are -written that are next-version-specific. As soon as documentation must be -written for the **next** version of the Helm Chart, you can use the following -checklist: - -``` -- [ ] Create a new tag for a documentation release (same release name with `-doc` at the end) -- [ ] Publish this tag -- [ ] Set ReadTheDocs to point to the **new tag** by default instead of `latest` -- [ ] Continue making next-version-specific changes to the documentation. -``` - -**Note**: Switching the documentation to `latest` after a new release is a stop-gap -measure to accomodate the fact that the documentation is still changing relatively -rapidly. Once the documentation as a whole stabilizes (after a few more release -cycles), we plan to begin switching straight from the last version to the new version -of documentation without going through latest. - -## Make a CHANGELOG - -This needs to be manually created, following the format of -current [CHANGELOG](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CHANGELOG.md). The general structure should be: - -* A short description of the general theme / points of interest for - this release. -* Breaking changes + a link to the [upgrade instructions](https://zero-to-jupyterhub.readthedocs.io/en/v0.5-doc/upgrading.html) in the docs -* A list of features with brief descriptions under each. -* The contributor list mentioned in the section below. - -## Add list of contributors - -We try to recognize *all* sorts of contributors, rather -than just code committers. - -Use the script in `tools/contributors.py` to list all -contributions (anyone who made a commit or a comment) -since the latest release. For each -release, you'll need to find the versions of all repos -involved: - -* [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) -* [KubeSpawner](https://github.com/jupyterhub/kubespawner) -* [JupyterHub](https://github.com/jupyterhub/jupyterhub) -* [OAuthenticator](https://github.com/jupyterhub/oauthenticator) - -Edit `contributors.py` to have the appropriate dates -for each of these versions. Then, run the script and paste -the output into the changelog. For an -example, see [the v0.5 list of contributors](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/v0.5/CHANGELOG.md#contributors). - - -## Push built images to DockerHub + bump version - -The JupyterHub helm chart uses a Docker image that's registered -on DockerHub. When releasing a new version of the helm chart, -you also need to push a new version of this image. To do so, -you must have: - -1. Docker running locally -2. An account on DockerHub that you are logged into from - your local docker installation. -3. Push rights for images under `jupyterhub/` on - the DockerHub registry. -4. Push rights to the `jupyterhub/helm-chart` repository on GitHub. -5. A local SSH key that will let you push to the `helm-chart` repository - on GitHub. See [these instructions](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for information on how to create this. - -**Note**: If you don't have a DockerHub account, or don't have push rights to -the DockerHub registry, open an issue and ping one of the core devs. - -If you have all of this, you can then: - -1. Check out latest master of [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) -2. Run `chartpress --tag --push --publish-chart`. - * For example, to relase `v0.5`, you would run - `chartpress --tag v0.5 --push --publish-chart`. - Note the `v` before version. -3. This will also modify the files `Chart.yaml` and `values.yaml`. - Commit these changes. -4. Look through the [z2jh documentation](https://zero-to-jupyterhub.readthedocs.io) and find any references to - the Helm Chart version (e.g., look for the flag `--version`, as well - as for all `helm upgrade` and `helm install` commands). - Update these references to point to the new version you are releasing. -5. Make a PR to the z2jh repository and notify the team to take a look. - -After this PR gets merged: - -1. Go to https://zero-to-jupyterhub.readthedocs.io/en/latest and - deploy a JupyterHub using the instructions (make sure that - you're reading from `/en/latest`). Make sure your latest - changes are present, and that the JupyterHub successfully deploys - and functions properly. - -Next, move on to making a GitHub release, described below. - -## Tagging and making a GitHub release - -Now that our Docker image is pushed and we have updated the documentation -for z2jh, it's time to make a new GitHub release. To do this, you must have: - -1. Push rights to the `jupyterhub/zero-to-jupyterhub-k8s` repo - -You will need to make a git tag, and then create a GitHub release. - -1. Make sure you're on branch `master` with your latest changes from - the section above pulled. -2. Make a git tag with: - ``` - git tag -a - ``` - - Where `` should be the new version that you're releasing. - Note the `v` before the version number. - - Git will ask you to include a message with the tag. - Paste the entire contents of the CHANGELOG for this particular release. - An easy way to do this is to paste the contents in a text file, and - then refer to that text file with the call to commit: - `git tag -a -F ` -3. Push the tags to the `jupyterhub/zero-to-jupyterhub-k8s` repo with - `git push --tags`. - Note that `` is whatever your local git uses to refer - to the `jupyerhub/` organization's repository (e.g., `official` - or `upstream`) -3. Make a **GitHub Release**: - * go to https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases and click 'Draft new release'. - * The title should be the new version, followed by the name of the cricketer for the release. Like so:`v0.5: "Hamid Hassan"`. - * The description should include the entire changelog entry for this release. - * Make sure the title/description/tag name look correct, and then click - on `Publish Release`. - -You've just made a GitHub release! - - -## ReadTheDocs update - -Wait a few hours to let the release 'cool' and make sure that links, -webpages, etc have updated. Then, update our documentation settings on -readthedocs to show `latest` by default. This marks the official -'release' of the version! - -## Last step - release a blog post and tell the world! - -The final step is to release a blog post. This doesn't have to be -done by the person who performed all of the above actions. - -To release a blog post for the new version, start a draft on the Jupyter Medium -blog. Copy/paste the section of the CHANGELOG corresponding to the new -release, then make minor modifications to make it more blog-friendly. - -Don't forget to tell the JupyterHub community about the new release, and -to encourage people to talk about it on social media! - -That's it! Congratulations on making a new release of JupyterHub! - -## Extra step - release a documentation release - -It is common that documentation changes are made shortly after a new release. -To handle this, we often create a documentation release a few days after a -major release. - -To do this, confirm that all changes to the documentation -are merged into master, then create a new tag with the same release name and -`-doc` appended to the end. Create a GitHub release with the new tag and a -description that points to the original release description. Finally, set -our ReadTheDocs settings to point users to the new `-doc` tag by default instead -of `latest`. - +## Pre-release iteration 1 + +- Make a changelog + - [ ] Summarize points of interest. + - [ ] List breaking changes and mention the [upgrade instructions](https://z2jh.jupyter.org/en/latest/upgrading.html). + - [ ] Update the upgrade instructions. + - [ ] List features with brief descriptions. + +- Pre-release + - [ ] Create and push a git tag + + ```bash + git checkout master + git pull master + git tag -a x.y.z-beta.1 + git push --tags + ``` + +- Update documentation + - [ ] Update old version references to the new version + +- Communicate + - [ ] Write a discourse post + +- Verify + - [ ] Follow one set of instructions to deploy on z2jh.jupyter.org + +## Final release + +- Update changelog + - [ ] Update the changelog + - [ ] Generate and add a list of contributors + + ```bash + # install dependencies for the script + pip install pygithub requests-cache tqdm + + # NOTE: You may need to wait a long time for this to finish. You may even + # get errors because you used too much of your API quota. If that + # happens, you can re-run it again later and rely on the caching to + # ensure you make progress. You have 5000 requests per hour. + + # get a GITHUB_API_TOKEN for use with the script + GITHUB_API_TOKEN="your-token" tools/contributors.py + ``` + +- Release + - [ ] Create and push a git tag + + ```bash + git checkout master + git pull master + git tag -a x.y.z + git push --tags + ``` + + - [ ] Create a GitHub release + + Visit the [release + page](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases) and + create a new release referencing the recent tag. Add a brief text like the + one below. + + ```Markdown + # TODO: Figure out how to... + - Warn about eventual breaking changes. + - Reference upgrade instructions and the changelog. + - NOTE: Also make the upgrade instructions contain a reference on what to do if they fail. + ``` + +- Communicate + - [ ] Write a discourse post + - [ ] Write a blog post + - [ ] Tweet about it diff --git a/ci/publish b/ci/publish index 01e5bbb0e9..36afab3045 100755 --- a/ci/publish +++ b/ci/publish @@ -16,7 +16,11 @@ set -x # git ahead of time to use the identity we decrypted earlier. export GIT_SSH_COMMAND="ssh -i ${PWD}/ci/id_rsa" -chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart +if [ "${TRAVIS_TAG:-}" == "" ]; then + chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart +else + chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart --tag "${TRAVIS_TAG}" +fi # Let us log the changes chartpress did, it should include replacements for # fields in values.yaml, such as what tag for various images we are using. diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index e9dafbe531..21ccf4d67d 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,5 +1,5 @@ name: jupyterhub -version: 0.9-0924e65 +version: 0.9-dev appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org diff --git a/tools/contributors.py b/tools/contributors.py index cdce42ef87..293ef70edf 100755 --- a/tools/contributors.py +++ b/tools/contributors.py @@ -1,30 +1,27 @@ #!/usr/bin/env python3 """ -Script to list *all* contributors to a given set of repos, -between a set of dates. This includes: +Script to list *all* contributors to a given set of repos, between a set of +dates. This includes: - Everyone who has made a commit merged between those dates - Everyone who has opened or commented on an issue between those dates - Everyone who has opened or commented on a PR between those dates -If you think this misses people who make contributions of a specific -form, feel free to add them here! +This script outputs a markdown formatted list of contributors in casefolded name +(username if name not specified) order. -This script outputs a markdown formatted list of contributors -in casefolded name (username if name not specified) order. - -Since we will be making a ton of requests to the GitHub API, you -need a GitHub API Token to use this script. The easiest way is to -just get a Personal Access Token (https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/). -Treat this token similar to how you would treat a password! -You can pass this token in with `GITHUB_API_TOKEN` environment -variable. For example, +Since we will be making a ton of requests to the GitHub API, you need a GitHub +API Token to use this script. The easiest way is to just get a Personal Access +Token +(https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/). +Treat this token similar to how you would treat a password! You can pass this +token in with `GITHUB_API_TOKEN` environment variable. For example, $ GITHUB_API_TOKEN="your-token" ./tools/contributors.py -Note that if you put a space before your command, it does not -get stored in your bash history (by default)!. Look up `HISTCONTROL` -to learn more about this feature of shells. +Note that if you put a space before your command, it does not get stored in your +bash history (by default)!. Look up `HISTCONTROL` to learn more about this +feature of shells. IMPORTANT: You may need to run this script twice or so, utilizing the previous runs cached @@ -32,10 +29,10 @@ """ import os -import dateutil +from dateutil.parser import parse + import requests_cache from github import Github -from dateutil.parser import parse from tqdm import tqdm requests_cache.install_cache('github') From c89542a2c0337a4fc3ba835dd936563573a8c8eb Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 30 Sep 2019 21:24:00 +0200 Subject: [PATCH 17/77] Apply suggestions from code review Co-Authored-By: Carol Willing --- .binder/README.md | 2 +- .circleci/README.md | 4 ++-- RELEASE.md | 2 +- ci/kind-load-docker-images.py | 2 ++ tests/README.md | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.binder/README.md b/.binder/README.md index 5914e4391b..ed2069b386 100644 --- a/.binder/README.md +++ b/.binder/README.md @@ -1,3 +1,3 @@ # What is this folder about? -It's contains the dependency information required by [a notebook](doc/ntbk/draw_function.ipynb) that [we reference](doc/source/cost.rst) in our documentation to run on mybinder.org. All it takes is the click of [a link](http://mybinder.org/v2/gh/jupyterhub/zero-to-jupyterhub-k8s/master?filepath=doc/ntbk/draw_function.ipynb) thanks to this. +This folder contains the dependency information required by [a notebook](doc/ntbk/draw_function.ipynb) [referenced](doc/source/cost.rst) in our documentation to run on mybinder.org. This dependency information allows a click of [a link](http://mybinder.org/v2/gh/jupyterhub/zero-to-jupyterhub-k8s/master?filepath=doc/ntbk/draw_function.ipynb) to create the rendered notebook. diff --git a/.circleci/README.md b/.circleci/README.md index 882bb52d92..6e3b70f622 100644 --- a/.circleci/README.md +++ b/.circleci/README.md @@ -4,5 +4,5 @@ We use CircleCI to build documentation previews for PRs, as configured through [.circleci/config.yml], this allows us to easily preview documentation changes in a PR in its final form before the PR is merged. -When a PR is merged [readthedocs.yml](readthedocs.yml) will help ReadTheDocs -build and publish it on https://z2jh.jupyter.org. +When a PR is merged, the configuration in [readthedocs.yml](readthedocs.yml) will help ReadTheDocs +build the docs and publish to https://z2jh.jupyter.org. diff --git a/RELEASE.md b/RELEASE.md index ab5b64d63b..2d145007a0 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,6 +1,6 @@ # Release process -Start by making a release issue using the template below, it can be followed to +Start by making a release issue using the template below. The issue checklist can be followed to release a new version of the Helm chart and help everybody coordinate. Do some copy pasting! diff --git a/ci/kind-load-docker-images.py b/ci/kind-load-docker-images.py index 58996177bd..fd09dad178 100755 --- a/ci/kind-load-docker-images.py +++ b/ci/kind-load-docker-images.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 """ +Functions using kind to load docker images. + Run `kind load docker-image ` on all the docker images within values.yaml that is available locally on the host as first verified with `docker images --quiet `. If we could capture this directly from chartpress diff --git a/tests/README.md b/tests/README.md index ffc9e22431..7f16a141a8 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,3 +1,3 @@ # What is this folder about? -We have setup tests for [pytest](https://docs.pytest.org/en/latest/) that will run in our CI/CD pipeline on Travis. These test assumes it is able to speak directly to a running hub within a Kubernetes cluster etc. In practice, they assume you have been using `dev` script to set it all up. +This folder contains tests that [pytest](https://docs.pytest.org/en/latest/) will run in our CI/CD pipeline on Travis. These tests must be able to speak directly to a running hub within a Kubernetes cluster etc. In practice, the tests will be set up using a `dev` script. From 8fc4a5158e093adaedf3eb2df9e6ebdf5a9af22b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 07:26:02 +0200 Subject: [PATCH 18/77] Correct spelling mistakes --- ci/common | 2 +- ci/publish | 4 ++-- ci/start-k8s | 2 +- ci/travis-docker-fix | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ci/common b/ci/common index 7763c6d6b0..cf006f75ba 100755 --- a/ci/common +++ b/ci/common @@ -33,7 +33,7 @@ if [ -z ${KUBEVAL_VERSION:-} ]; then fi ## Valid versions to list under LINT_KUBE_VERSIONS are those in the -## kubernetes-json-schema repoistory, used by kubeval. +## kubernetes-json-schema repository, used by kubeval. ## ## ref: https://github.com/instrumenta/kubernetes-json-schema ## diff --git a/ci/publish b/ci/publish index 36afab3045..1e2c9165d1 100755 --- a/ci/publish +++ b/ci/publish @@ -2,8 +2,8 @@ set -eu # Decrypt a private SSH key having its public key registered on GitHub. It will -# be used to establish an identity with rights to push to the repo hosting our -# Helm charts: https://github.com/jupyterhub/helm-chart +# be used to establish an identity with rights to push to the git repository +# hosting our Helm charts: https://github.com/jupyterhub/helm-chart openssl aes-256-cbc -K $encrypted_c6b45058ffe8_key -iv $encrypted_c6b45058ffe8_iv -in ci/publish-id_rsa.enc -out ci/id_rsa -d chmod 0400 ci/id_rsa diff --git a/ci/start-k8s b/ci/start-k8s index ce87244736..185d53c4b7 100755 --- a/ci/start-k8s +++ b/ci/start-k8s @@ -56,7 +56,7 @@ CALICO_ETCD_IP=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.s CALICO_ETCD_PORT=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.ports[0].port}') sed -i -e "s/:/$CALICO_ETCD_IP:$CALICO_ETCD_PORT/" ci/daemonset-calico-node.yaml kubectl apply -f ci/daemonset-calico-node.yaml -# NOTE: daemonset/calico-node pods' main container fails to startup without +# NOTE: daemonset/calico-node pods' main container fails to start up without # an additional environment variable configured to disable a check # that we fail. # diff --git a/ci/travis-docker-fix b/ci/travis-docker-fix index e7f04525f1..2849d10223 100755 --- a/ci/travis-docker-fix +++ b/ci/travis-docker-fix @@ -1,9 +1,9 @@ #!/bin/bash set -eu -# This is a workaround to an issue caused by the existance of a docker registry +# This is a workaround to an issue caused by the existence of a docker registry # mirror in our CI environment. Without this fix that removes the mirror, -# chartpress fails to realize the existance of already built images and rebuilds +# chartpress fails to realize the existence of already built images and rebuilds # them. # # ref: https://github.com/moby/moby/issues/39120 From ca368ca12e581240246b1fc6c3d6bb6bbea18a22 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 08:07:11 +0200 Subject: [PATCH 19/77] Document use of kind-load-docker-images.py --- ci/kind-load-docker-images.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ci/kind-load-docker-images.py b/ci/kind-load-docker-images.py index fd09dad178..e788ab7a01 100755 --- a/ci/kind-load-docker-images.py +++ b/ci/kind-load-docker-images.py @@ -6,6 +6,16 @@ values.yaml that is available locally on the host as first verified with `docker images --quiet `. If we could capture this directly from chartpress build output it would be quicker. + +Example: to get help about the parameters and their default values. + + ci/kind-load-docker-images.py --help + +Example: after you have run `chartpress` to modify the values.yaml files with +the newly built image name and tags, you can run this command to ensure the kind +cluster named "dev" gets the required docker images from your local registry. + + ci/kind-load-docker-images.py --kind-cluster dev """ import sys From fe81dbab3e38e7bdfb6333aa232e43151fa0f6ba Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 08:07:21 +0200 Subject: [PATCH 20/77] Correct inline comment --- vagrant-vm-setup.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vagrant-vm-setup.sh b/vagrant-vm-setup.sh index e173a10b78..ef89f93adb 100644 --- a/vagrant-vm-setup.sh +++ b/vagrant-vm-setup.sh @@ -16,6 +16,5 @@ echo 'PATH=$PATH:~/.local/bin' >> /home/vagrant/.bashrc curl -sSL https://get.docker.com | sh usermod -aG docker vagrant -## When we run ./ci/vagrant-run-ci.sh we get some environment variables set, -## but these will be lost if the script quits due to an error. +## Put to be downloaded binaries on PATH echo 'PATH=$PATH:~/zero-to-jupyterhub-k8s/bin' >> /home/vagrant/.bashrc From 503badd47e71fcdab8fb513100c4ff05a3fcb8eb Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 14:05:55 +0200 Subject: [PATCH 21/77] Merge dev-config-netpol into the dev-config --- ci/upgrade | 8 +++++--- dev-config-netpol.yaml | 25 ------------------------- dev-config.yaml | 21 +++++++++++++++++++++ tests/test_spawn.py | 6 +++--- 4 files changed, 29 insertions(+), 31 deletions(-) delete mode 100644 dev-config-netpol.yaml diff --git a/ci/upgrade b/ci/upgrade index 30f2180ce7..a5073e775a 100755 --- a/ci/upgrade +++ b/ci/upgrade @@ -23,9 +23,11 @@ echo "load the images the kind cluster" python3 ci/kind-load-docker-images.py --kind-cluster $KIND_CLUSTER --values ./jupyterhub/values.yaml echo "install our deployment" -helm upgrade --install jh-ci --wait --namespace jh-ci ./jupyterhub \ - --values dev-config.yaml \ - --values dev-config-netpol.yaml +helm upgrade jh-ci ./jupyterhub \ + --install \ + --namespace jh-ci \ + --values dev-config.yaml \ + --wait echo "waiting for hub and proxy to become responsive" kubectl rollout status deployment/proxy --timeout 1m diff --git a/dev-config-netpol.yaml b/dev-config-netpol.yaml deleted file mode 100644 index 5144a4f6fa..0000000000 --- a/dev-config-netpol.yaml +++ /dev/null @@ -1,25 +0,0 @@ -hub: - networkPolicy: - enabled: true - -proxy: - networkPolicy: - enabled: true - -singleuser: - networkPolicy: - enabled: true - # Block all egress apart from DNS and jupyter.org - # CIDR must match the allowed URL in test_singleuser_netpol - egress: - - ports: - - port: 53 - protocol: UDP - - to: - - ipBlock: - cidr: 104.28.9.110/32 - - ipBlock: - cidr: 104.28.8.110/32 - -debug: - enabled: true diff --git a/dev-config.yaml b/dev-config.yaml index e864bdd9dd..8aae709ed7 100644 --- a/dev-config.yaml +++ b/dev-config.yaml @@ -9,6 +9,8 @@ proxy: requests: memory: 0 cpu: 0 + networkPolicy: + enabled: true hub: cookieSecret: 1470700e01f77171c2c67b12130c25081dfbdf2697af8c2f2bd05621b31100bf @@ -22,6 +24,8 @@ hub: test: admin: true apiToken: 0cc05feaefeeb29179e924ffc6d3886ffacf0d1a28ab225f5c210436ffc5cfd5 + networkPolicy: + enabled: true singleuser: @@ -29,6 +33,23 @@ singleuser: type: none memory: guarantee: null + networkPolicy: + enabled: true + # Block all egress apart from DNS and jupyter.org + # CIDR must match the allowed URL in test_singleuser_netpol + egress: + - ports: + ## port 53 is the default port for DNS queries + - port: 53 + protocol: UDP + - to: + ## nslookup jupyter.org + ## - 104.28.9.110 + ## - 104.28.8.110 + - ipBlock: + cidr: 104.28.9.110/32 + - ipBlock: + cidr: 104.28.8.110/32 prePuller: hook: diff --git a/tests/test_spawn.py b/tests/test_spawn.py index bf49542c98..eb7023bb15 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -129,18 +129,18 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): print(server_model) pod_name = server_model['state']['pod_name'] - # Must match CIDR in dev-config-netpol.yaml + # Must match CIDR in singleuser.networkPolicy.egress. allowed_url = 'http://jupyter.org' blocked_url = 'http://mybinder.org' c = subprocess.run([ 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', - 'wget', '-q', '-t1', '-T5', allowed_url]) + 'wget', '--quiet', '--tries', '1', '--timeout', '5', allowed_url]) assert c.returncode == 0, "Unable to get allowed domain" c = subprocess.run([ 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', - 'wget', '-q', '-t1', '-T5', blocked_url]) + 'wget', '--quiet', '--tries', '1', '--timeout', '5', blocked_url]) assert c.returncode > 0, "Blocked domain was allowed" finally: From 361ec24d25ff5b09fd503a50b0972fb00e2cee5b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 14:09:20 +0200 Subject: [PATCH 22/77] Black autoformatting on tests/ --- tests/conftest.py | 40 ++++++++-------- tests/test_spawn.py | 108 ++++++++++++++++++++++++++++---------------- 2 files changed, 89 insertions(+), 59 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index e7e9abb0b7..16b01888f6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,20 +9,18 @@ import yaml -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def request_data(): basedir = os.path.dirname(os.path.dirname(__file__)) - with open(os.path.join(basedir, 'dev-config.yaml')) as f: + with open(os.path.join(basedir, "dev-config.yaml")) as f: y = yaml.safe_load(f) - token = y['hub']['services']['test']['apiToken'] + token = y["hub"]["services"]["test"]["apiToken"] return { - 'token': token, - 'hub_url': os.getenv('HUB_API_URL', 'http://localhost:31212/hub/api'), - 'headers': { - 'Authorization': f'token {token}' - }, - 'test_timeout': 300, - 'request_timeout': 60, + "token": token, + "hub_url": os.getenv("HUB_API_URL", "http://localhost:31212/hub/api"), + "headers": {"Authorization": f"token {token}"}, + "test_timeout": 300, + "request_timeout": 60, } @@ -31,39 +29,39 @@ def __init__(self, request_data): self.request_data = request_data def _setup_kwargs(self, kwargs): - kwargs['headers'] = kwargs.get('headers', self.request_data['headers']) - kwargs['timeout'] = kwargs.get('timeout', self.request_data['request_timeout']) + kwargs["headers"] = kwargs.get("headers", self.request_data["headers"]) + kwargs["timeout"] = kwargs.get("timeout", self.request_data["request_timeout"]) def delete(self, api, **kwargs): self._setup_kwargs(kwargs) - return requests.delete(self.request_data['hub_url'] + api, **kwargs) + return requests.delete(self.request_data["hub_url"] + api, **kwargs) def get(self, api, **kwargs): self._setup_kwargs(kwargs) - return requests.get(self.request_data['hub_url'] + api, **kwargs) + return requests.get(self.request_data["hub_url"] + api, **kwargs) def post(self, api, **kwargs): self._setup_kwargs(kwargs) - return requests.post(self.request_data['hub_url'] + api, **kwargs) + return requests.post(self.request_data["hub_url"] + api, **kwargs) def put(self, api, **kwargs): self._setup_kwargs(kwargs) - return requests.put(self.request_data['hub_url'] + api, **kwargs) + return requests.put(self.request_data["hub_url"] + api, **kwargs) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def api_request(request_data): return JupyterRequest(request_data) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def jupyter_user(api_request): """ A temporary unique JupyterHub user """ - username = 'testuser-' + str(uuid.uuid4()) - r = api_request.post('/users/' + username) + username = "testuser-" + str(uuid.uuid4()) + r = api_request.post("/users/" + username) assert r.status_code == 201 yield username - r = api_request.delete('/users/' + username) + r = api_request.delete("/users/" + username) assert r.status_code == 204 diff --git a/tests/test_spawn.py b/tests/test_spawn.py index eb7023bb15..062c263bb1 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -17,16 +17,16 @@ # load app version of chart here = os.path.dirname(os.path.abspath(__file__)) -chart_yaml = os.path.join(here, os.pardir, 'jupyterhub', 'Chart.yaml') +chart_yaml = os.path.join(here, os.pardir, "jupyterhub", "Chart.yaml") with open(chart_yaml) as f: chart = yaml.safe_load(f) - jupyterhub_version = chart['appVersion'] + jupyterhub_version = chart["appVersion"] def test_api(api_request): print("asking for the hub's version") - r = api_request.get('') + r = api_request.get("") assert r.status_code == 200 assert r.json().get("version", "version-missing") == jupyterhub_version @@ -37,10 +37,10 @@ def test_api(api_request): def test_api_info(api_request): print("asking for the hub information") - r = api_request.get('/info') + r = api_request.get("/info") assert r.status_code == 200 result = r.json() - assert result['spawner']['class'] == 'kubespawner.spawner.KubeSpawner' + assert result["spawner"]["class"] == "kubespawner.spawner.KubeSpawner" """kubectl logs deploy/hub - on a successful run [I 2019-09-25 12:03:12.086 JupyterHub log:174] 200 GET /hub/api/info (test@127.0.0.1) 10.21ms @@ -57,9 +57,9 @@ def test_hub_api_create_user_and_get_information_about_user(api_request, jupyter # Also note that the fixture will automatically clean up the # user from the hub's database when the function exit. print("create a user, and get information about the user") - r = api_request.get('/users/' + jupyter_user) + r = api_request.get("/users/" + jupyter_user) assert r.status_code == 200 - assert r.json()['name'] == jupyter_user + assert r.json()["name"] == jupyter_user """kubectl logs deploy/hub - on a successful run [I 2019-09-25 12:03:12.126 JupyterHub log:174] 201 POST /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 20.74ms @@ -75,9 +75,9 @@ def test_hub_api_create_user_and_get_information_about_user(api_request, jupyter def test_hub_api_list_users(api_request, jupyter_user): print("create a test user, get information about all users, and find the test user") - r = api_request.get('/users') + r = api_request.get("/users") assert r.status_code == 200 - assert any(u['name'] == jupyter_user for u in r.json()) + assert any(u["name"] == jupyter_user for u in r.json()) """kubectl logs deploy/hub - on a successful run [I 2019-09-25 12:03:12.303 JupyterHub log:174] 201 POST /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 15.53ms @@ -88,17 +88,17 @@ def test_hub_api_list_users(api_request, jupyter_user): def test_hub_can_talk_to_proxy(api_request, request_data): - endtime = time.time() + request_data['test_timeout'] + endtime = time.time() + request_data["test_timeout"] while time.time() < endtime: try: - r = api_request.get('/proxy') + r = api_request.get("/proxy") if r.status_code == 200: break print(r.json()) except requests.RequestException as e: print(e) time.sleep(1) - assert r.status_code == 200, 'Failed to get /proxy' + assert r.status_code == 200, "Failed to get /proxy" """kubectl logs deploy/hub - on a successful run [I 2019-09-25 12:03:12.395 JupyterHub log:174] 200 GET /hub/api/proxy (test@127.0.0.1) 13.48ms @@ -107,44 +107,76 @@ def test_hub_can_talk_to_proxy(api_request, request_data): def test_hub_api_request_user_spawn(api_request, jupyter_user, request_data): print("asking kubespawner to spawn a server for a test user") - r = api_request.post('/users/' + jupyter_user + '/server') + r = api_request.post("/users/" + jupyter_user + "/server") assert r.status_code in (201, 202) try: - server_model = _wait_for_user_to_spawn(api_request, jupyter_user, request_data['test_timeout']) + server_model = _wait_for_user_to_spawn( + api_request, jupyter_user, request_data["test_timeout"] + ) assert server_model - r = requests.get(request_data['hub_url'].partition('/hub/api')[0] + server_model['url'] + "api") + r = requests.get( + request_data["hub_url"].partition("/hub/api")[0] + + server_model["url"] + + "api" + ) assert r.status_code == 200 - assert 'version' in r.json() + assert "version" in r.json() finally: - _delete_server(api_request, jupyter_user, request_data['test_timeout']) + _delete_server(api_request, jupyter_user, request_data["test_timeout"]) def test_singleuser_netpol(api_request, jupyter_user, request_data): - print("asking kubespawner to spawn a server for a test user to test network policies") - r = api_request.post('/users/' + jupyter_user + '/server') + print( + "asking kubespawner to spawn a server for a test user to test network policies" + ) + r = api_request.post("/users/" + jupyter_user + "/server") assert r.status_code in (201, 202) try: - server_model = _wait_for_user_to_spawn(api_request, jupyter_user, request_data['test_timeout']) + server_model = _wait_for_user_to_spawn( + api_request, jupyter_user, request_data["test_timeout"] + ) assert server_model print(server_model) - pod_name = server_model['state']['pod_name'] + pod_name = server_model["state"]["pod_name"] # Must match CIDR in singleuser.networkPolicy.egress. - allowed_url = 'http://jupyter.org' - blocked_url = 'http://mybinder.org' - - c = subprocess.run([ - 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', - 'wget', '--quiet', '--tries', '1', '--timeout', '5', allowed_url]) + allowed_url = "http://jupyter.org" + blocked_url = "http://mybinder.org" + + c = subprocess.run( + [ + "kubectl", + "--namespace=jh-ci", + "exec", + pod_name, + "--", + "wget", + "--quiet", + "--tries=1", + "--timeout=5", + allowed_url, + ] + ) assert c.returncode == 0, "Unable to get allowed domain" - c = subprocess.run([ - 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', - 'wget', '--quiet', '--tries', '1', '--timeout', '5', blocked_url]) + c = subprocess.run( + [ + "kubectl", + "--namespace=jh-ci", + "exec", + pod_name, + "--", + "wget", + "--quiet", + "--tries=1", + "--timeout=5", + blocked_url, + ] + ) assert c.returncode > 0, "Blocked domain was allowed" finally: - _delete_server(api_request, jupyter_user, request_data['test_timeout']) + _delete_server(api_request, jupyter_user, request_data["test_timeout"]) def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): @@ -152,18 +184,18 @@ def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): while time.time() < endtime: # NOTE: If this fails with a 503 response from the proxy, the hub pod has # probably crashed by the tests interaction with it. - r = api_request.get('/users/' + jupyter_user) + r = api_request.get("/users/" + jupyter_user) r.raise_for_status() user_model = r.json() # will be pending while starting, # server will be set when ready - if '' not in user_model['servers']: + if "" not in user_model["servers"]: # spawn failed! raise RuntimeError("Server never started!") - server_model = user_model['servers'][''] - if server_model['ready']: + server_model = user_model["servers"][""] + if server_model["ready"]: return server_model time.sleep(1) @@ -174,15 +206,15 @@ def _delete_server(api_request, jupyter_user, timeout): # NOTE: If this fails with a 503 response from the proxy, the hub pod has # probably crashed by the tests interaction with it. - r = api_request.delete('/users/' + jupyter_user + '/server') + r = api_request.delete("/users/" + jupyter_user + "/server") assert r.status_code in (202, 204) endtime = time.time() + timeout while time.time() < endtime: - r = api_request.get('/users/' + jupyter_user) + r = api_request.get("/users/" + jupyter_user) r.raise_for_status() user_model = r.json() - if '' not in user_model['servers']: + if "" not in user_model["servers"]: return True time.sleep(1) return False From 55b5da1250f0cc26b70e542abc3fa5122eeb08c1 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 14:11:21 +0200 Subject: [PATCH 23/77] Fail quick on missing HUB_API_URL The port used was related to minikube, but there is also the use of kind, and the port is quite arbritary anyhow, so let's fail quick instead to make it easier to conclude whats wrong if the environment variable isn't set. --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 16b01888f6..f484f67a2a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -17,7 +17,7 @@ def request_data(): token = y["hub"]["services"]["test"]["apiToken"] return { "token": token, - "hub_url": os.getenv("HUB_API_URL", "http://localhost:31212/hub/api"), + "hub_url": os.environ["HUB_API_URL"], "headers": {"Authorization": f"token {token}"}, "test_timeout": 300, "request_timeout": 60, From 55fbee03feddb755ac54088daae137995dec6272 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 14:18:34 +0200 Subject: [PATCH 24/77] Cleanup comments --- tests/test_spawn.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/tests/test_spawn.py b/tests/test_spawn.py index 062c263bb1..91f6fa6381 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -6,12 +6,6 @@ import requests import yaml -## DEV NOTES: -## A lot of logs are currently in the code for debugging purposes. -## -## ref: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/589410196 -## - # Makes heavy use of JupyterHub's API: # http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml @@ -182,8 +176,8 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): endtime = time.time() + timeout while time.time() < endtime: - # NOTE: If this fails with a 503 response from the proxy, the hub pod has - # probably crashed by the tests interaction with it. + # NOTE: If this request fails with a 503 response from the proxy, the + # hub pod has probably crashed by the tests interaction with it. r = api_request.get("/users/" + jupyter_user) r.raise_for_status() user_model = r.json() @@ -203,9 +197,8 @@ def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): def _delete_server(api_request, jupyter_user, timeout): - # NOTE: If this fails with a 503 response from the proxy, the hub pod has - # probably crashed by the tests interaction with it. - + # NOTE: If this request fails with a 503 response from the proxy, the hub + # pod has probably crashed by the previous tests' interaction with it. r = api_request.delete("/users/" + jupyter_user + "/server") assert r.status_code in (202, 204) From 0982d2d3dc5233bfbf82c528936c1429bd930e19 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 1 Oct 2019 14:19:12 +0200 Subject: [PATCH 25/77] Consistently use yaml.safe_load over yaml.full_load --- ci/kind-load-docker-images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/kind-load-docker-images.py b/ci/kind-load-docker-images.py index e788ab7a01..1d410bbea5 100755 --- a/ci/kind-load-docker-images.py +++ b/ci/kind-load-docker-images.py @@ -52,10 +52,10 @@ def extract_images_from_values(chartpress_file, values_file): """Returns a list of image:tag strings given a values.yaml file.""" with open(chartpress_file) as f: - chartpress = yaml.full_load(f) + chartpress = yaml.safe_load(f) with open(values_file) as f: - values = yaml.full_load(f) + values = yaml.safe_load(f) image_paths = [] for chart in chartpress["charts"]: From 4be955c8bfe21049c6282b0af19bffd70d339c20 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 2 Oct 2019 22:45:24 +0200 Subject: [PATCH 26/77] Apply suggestions from code review Co-Authored-By: Simon Li --- images/hub/Dockerfile | 2 +- tests/test_spawn.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/hub/Dockerfile b/images/hub/Dockerfile index 58a545222f..36e61a0adc 100644 --- a/images/hub/Dockerfile +++ b/images/hub/Dockerfile @@ -1,6 +1,6 @@ FROM ubuntu:18.04 -## NOTE: This is a default and be overrridden by chartpress using the +## NOTE: This is a default and be overridden by chartpress using the ## chartpress.yaml configuration ARG JUPYTERHUB_VERSION=1.0.* diff --git a/tests/test_spawn.py b/tests/test_spawn.py index 91f6fa6381..f163085541 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -49,7 +49,7 @@ def test_hub_api_create_user_and_get_information_about_user(api_request, jupyter # user. # # Also note that the fixture will automatically clean up the - # user from the hub's database when the function exit. + # user from the hub's database when the function exits. print("create a user, and get information about the user") r = api_request.get("/users/" + jupyter_user) assert r.status_code == 200 From 633cf6658d6301e9724970bee908c0322ae44b7b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 3 Oct 2019 16:36:32 +0200 Subject: [PATCH 27/77] Re-fix MacOS DNS with @manics' still relevant fix --- vagrant-vm-setup.sh | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/vagrant-vm-setup.sh b/vagrant-vm-setup.sh index ef89f93adb..8de197d41d 100644 --- a/vagrant-vm-setup.sh +++ b/vagrant-vm-setup.sh @@ -1,20 +1,46 @@ #!/bin/sh set -eu -## Install pip -## -## NOTE: pip installs executable packages in ~/.local/bin -## +# Install pip +# +# NOTE: pip installs executable packages in ~/.local/bin +# apt-get -q update apt-get -q install -y python3-pip echo 'PATH=$PATH:~/.local/bin' >> /home/vagrant/.bashrc -## Install Docker CE -## -## ref: https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-using-the-convenience-script -## + +# Install Docker CE +# +# ref: https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-using-the-convenience-script +# curl -sSL https://get.docker.com | sh usermod -aG docker vagrant -## Put to be downloaded binaries on PATH + +# Workaround a DNS problem for MacOS running Kubernetes +# +# ref: https://github.com/kubernetes/minikube/issues/2027#issuecomment-338221646 +# +# 1. Append two nameserver entries in /etc/hosts +# +cat << EOF > /etc/resolv.conf +nameserver 8.8.4.4 +nameserver 8.8.8.8 +EOF +# 2. Edit the line starting with 127.0.0.1 in /etc/hosts +# +# "127.0.0.1 localhost" becomes "127.0.0.1 localhost ubuntu1804.localdomain" +# +# NOTE: The sed command below updates the relevant line in the file in place +# -i : --in-place +# -r : --regexp-extended +# -e : --expression +# \1 : anything captured in the first parenthesis +# \\ : "\" escaped for bash with "\\" +# +sed -i -re "s/^(127.0.0.1\\s.+)/\\1 `hostname`/" /etc/hosts + + +# Put to be downloaded binaries on PATH echo 'PATH=$PATH:~/zero-to-jupyterhub-k8s/bin' >> /home/vagrant/.bashrc From 0109246e4da47a167a1a4e25fe83b19440ea8e50 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Sat, 5 Oct 2019 19:11:53 +0200 Subject: [PATCH 28/77] Align with chartpress --reset's default value If you run `chartpress --reset` image tags will be reset to some value that can be configured in chartpress.yaml using `resetTag`. This makes our values align with the chartpress defaults instead when we reset. NOTE: `--reset` is available only in chartpress 0.3.2 and higher versions. --- jupyterhub/values.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml index abcb8e32ec..ac61c810ab 100644 --- a/jupyterhub/values.yaml +++ b/jupyterhub/values.yaml @@ -48,7 +48,7 @@ hub: extraVolumeMounts: [] image: name: jupyterhub/k8s-hub - tag: 'generated-by-chartpress' + tag: 'set-by-chartpress' # pullSecrets: # - secretName resources: @@ -203,7 +203,7 @@ singleuser: networkTools: image: name: jupyterhub/k8s-network-tools - tag: 'generated-by-chartpress' + tag: 'set-by-chartpress' cloudMetadata: enabled: false ip: 169.254.169.254 @@ -245,7 +245,7 @@ singleuser: storageAccessModes: [ReadWriteOnce] image: name: jupyterhub/k8s-singleuser-sample - tag: 'generated-by-chartpress' + tag: 'set-by-chartpress' pullPolicy: IfNotPresent # pullSecrets: # - secretName @@ -316,7 +316,7 @@ prePuller: enabled: true image: name: jupyterhub/k8s-image-awaiter - tag: 'generated-by-chartpress' + tag: 'set-by-chartpress' continuous: enabled: true extraImages: {} From bc9a5351c80417e81ec1687ae21cbc1060b10f6c Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Sun, 6 Oct 2019 22:24:42 +0200 Subject: [PATCH 29/77] Bump chartpress for --reset --- dev-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 740c5fe01c..3048a4b575 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -9,7 +9,7 @@ ## ## ref: https://github.com/jupyterhub/chartpress ## -chartpress==0.3.1 +chartpress==0.3.2 ## pytest run tests that require requests, pytest is run from test ## script From b990d680e556d09c840f40928a35f730fd820569 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 7 Oct 2019 00:49:51 +0200 Subject: [PATCH 30/77] Initial step from bash to python scripts --- dev.py | 178 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100755 dev.py diff --git a/dev.py b/dev.py new file mode 100755 index 0000000000..37513ddcf2 --- /dev/null +++ b/dev.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +Checks that can be made: +1. Are we using the correct cluster? If not KUBECONFIG is set explicitly in an + env var or through an .env file, we could fail. +2. Are the required dependencies available on path or in the ./bin folder? + +Requirements: +- KUBECONFIG is set +- dev-config.yaml is used +- + +.env file: + GITHUB_ACCESS_TOKEN - for release changes and contributors + KUBECONFIG - for kind clusters + HELM_HOME - for plugins + CHARTPRESS_COMMIT_RANGE - ? +""" + +import argparse +import os +import pipes +import subprocess +import sys + +import dotenv + + +def kind_start(force): + # check if there is a cluster existing already + # then delete it + + # start a new cluster with a fixed name, kubernetes version + # configure a default namespace + + # install calico + # install helm + pass + + +def kind_stop(): + # delete the kind cluster + pass + + +def upgrade(): + # consider commit-range + # run chartpress + # (conditionally) load images to a kind cluster + # helm upgrade / install with dev-config + # (?) port-forward + pass + +# req: kubectl, kubeconfig, running cluster, +def test(): + # pytest + pass + + +def check_templates(): + # lint-and-validate script + pass + + +def check_python_code(apply): + # black + pass + + +def changelog(): + # req: GITHUB_ACCESS_TOKEN + + # gitlab-activity + pass + + +def _check_output(cmd, **kwargs): + """Run a subcommand and exit if it fails""" + try: + return subprocess.check_output(cmd, **kwargs) + except subprocess.CalledProcessError as e: + print( + "`{}` exited with status {}".format( + " ".join(map(pipes.quote, cmd)), e.returncode + ), + file=sys.stderr, + ) + sys.exit(e.returncode) + + +def _get_argparser(): + _ = argparse.ArgumentParser( + description="Local development help for jupyterhub/zero-to-jupyterhub-k8s" + ) + _cmds = _.add_subparsers(title="Commands", dest="cmd", required=True) + + kind = _cmds.add_parser( + "kind", help="Kubernetes-in-Docker (kind) cluster management." + ) + kind_cmds = kind.add_subparsers(title="Commands", dest="sub_cmd", required=True) + kind_start = kind_cmds.add_parser( + "start", help="Start and initialize a kind Kubernetes cluster." + ) + kind_start.add_argument( + "-f", + "--force", + action="store_true", + help="If the cluster is already started, delete it and start a new.", + ) + kind_stop = kind_cmds.add_parser( + "stop", help="Stop and delete a previously started kind Kubernetes cluster." + ) + + upgrade = _cmds.add_parser( + "upgrade", help="Install or upgrade the Helm chart in the Kubernetes cluster." + ) + + test = _cmds.add_parser( + "test", help="Run tests on the deployed Helm chart in the Kubernetes cluster." + ) + + check = _cmds.add_parser( + "check", help="Run checks on your developed helm templates and python code." + ) + check_cmds = check.add_subparsers(title="Commands", dest="sub_cmd", required=True) + check_templates = check_cmds.add_parser( + "templates", + help="Run checks on the Helm templates and the Kubernetes resources they generate using: helm lint, helm templates, yamllint, and kubeval.", + ) + check_python_code = check_cmds.add_parser( + "python-code", help="Run checks on the python code using: black." + ) + check_python_code.add_argument( + "--apply", + action="store_true", + help="Apply autoformatting to the Python code files.", + ) + + changelog = _cmds.add_parser( + "changelog", + help="Generate a changelog since last release using: choldgraf/github-activity.", + ) + + return _ + + +if __name__ == "__main__": + # parse passed command line arguments + argparser = _get_argparser() + args = argparser.parse_args() + + # DEBUGGING: + print(args) + + # load environment variables from the .env file + dotenv.load_dotenv() + + # run suitable command and pass arguments + if args.cmd == "kind": + if args.sub_cmd == "start": + kind_start(force=args.force) + if args.sub_cmd == "stop": + kind_stop() + + if args.cmd == "upgrade": + upgrade() + + if args.cmd == "test": + test() + + if args.cmd == "check": + if args.sub_cmd == "templates": + check_templates() + if args.sub_cmd == "python-code": + check_python_code(apply=args.apply) + + if args.cmd == "changelog": + changelog() From f3f81c4f328b938b731ff5b78cd5dc792a543c7a Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 7 Oct 2019 15:04:37 +0200 Subject: [PATCH 31/77] Add decorator for binaries/envs dependencies --- dev.py | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/dev.py b/dev.py index 37513ddcf2..de329f5cd9 100755 --- a/dev.py +++ b/dev.py @@ -18,14 +18,44 @@ """ import argparse +import functools import os import pipes +import shutil import subprocess import sys import dotenv +def depend_on(binaries=[], envs=[]): + def decorator_depend_on(func): + @functools.wraps(func) + def wrapper_depend_on(*args, **kwargs): + missing_binaries = [] + for binary in binaries: + if shutil.which(binary) is None: + missing_binaries.append(binary) + missing_envs = [] + for env in envs: + if os.environ.get(env) is None: + missing_envs.append(env) + + if missing_binaries or missing_envs: + print('Exiting due to missing dependencies for "%s"' % func.__name__) + print("- Binaries: %s" % missing_binaries) + print("- Env vars: %s" % missing_envs) + + sys.exit(1) + else: + return func(*args, **kwargs) + + return wrapper_depend_on + + return decorator_depend_on + + +@depend_on(binaries=["kind"], envs=["KUBECONFIG"]) def kind_start(force): # check if there is a cluster existing already # then delete it @@ -38,11 +68,13 @@ def kind_start(force): pass +@depend_on(binaries=["kind"], envs=["KUBECONFIG"]) def kind_stop(): # delete the kind cluster pass +@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG"]) def upgrade(): # consider commit-range # run chartpress @@ -51,22 +83,26 @@ def upgrade(): # (?) port-forward pass -# req: kubectl, kubeconfig, running cluster, + +@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG"]) def test(): # pytest pass +@depend_on(binaries=["kubectl", "kubeval", ], envs=[]) def check_templates(): # lint-and-validate script pass +@depend_on(binaries=["black"], envs=[]) def check_python_code(apply): # black pass +@depend_on(binaries=[], envs=["GITHUB_ACCESS_TOKEN"]) def changelog(): # req: GITHUB_ACCESS_TOKEN From 523a3436432c3176bdc511cb22d8109ca244c348 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:05:09 +0200 Subject: [PATCH 32/77] Bulk work on the Python ./dev script --- .travis.yml | 2 +- ci/common | 8 +- ci/start-k8s | 6 +- ci/upgrade | 4 +- dev.py | 426 +++++++++++++++++++++++++++++++++++++----- jupyterhub/Chart.yaml | 2 +- tests/test_spawn.py | 8 +- 7 files changed, 396 insertions(+), 60 deletions(-) diff --git a/.travis.yml b/.travis.yml index b1367cf2e0..29e122b98c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -66,7 +66,7 @@ jobs: script: - setup_helm - setup_kubeval - - python3 tools/templates/lint-and-validate.py --kubernetes-versions $LINT_KUBE_VERSIONS + - python3 tools/templates/lint-and-validate.py --kubernetes-versions $VALIDATE_KUBE_VERSIONS env: [] - stage: publish script: diff --git a/ci/common b/ci/common index cf006f75ba..9cf21ac8d9 100755 --- a/ci/common +++ b/ci/common @@ -32,13 +32,13 @@ if [ -z ${KUBEVAL_VERSION:-} ]; then export KUBEVAL_VERSION=0.14.0 fi -## Valid versions to list under LINT_KUBE_VERSIONS are those in the +## Valid versions to list under VALIDATE_KUBE_VERSIONS are those in the ## kubernetes-json-schema repository, used by kubeval. ## ## ref: https://github.com/instrumenta/kubernetes-json-schema ## -if [ -z ${LINT_KUBE_VERSIONS:-} ]; then - export LINT_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0 +if [ -z ${VALIDATE_KUBE_VERSIONS:-} ]; then + export VALIDATE_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0 fi ## NOTE: The setup_... functions cache downloads but ensure the correct version @@ -96,7 +96,7 @@ setup_git_crypt () { } if [ "$1" = "ci" ]; then - export KIND_CLUSTER=jh-ci-${KUBE_VERSION} + export KIND_CLUSTER=jh-dev-${KUBE_VERSION} export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} else setup_kubectl diff --git a/ci/start-k8s b/ci/start-k8s index 185d53c4b7..fa5678369e 100755 --- a/ci/start-k8s +++ b/ci/start-k8s @@ -7,13 +7,13 @@ if [ "${KIND_CLUSTER:-}" == "" ]; then echo "Run \". ./dev init\" first!" exit 1 elif [ "${KIND_CLUSTER:-}" != "dev" ]; then - if [ "${KUBECONFIG:-}" != "$(kind get kubeconfig-path --name="jh-ci-${KUBE_VERSION:-}")" ]; then + if [ "${KUBECONFIG:-}" != "$(kind get kubeconfig-path --name="jh-dev-${KUBE_VERSION:-}")" ]; then echo "Assertion error: KUBECONFIG out of sync with KUBE_VERSION" echo "KUBECONFIG=${KUBECONFIG:-}" echo "KUBE_VERSION=${KUBE_VERSION:-}" echo "Run \". ./ci/common\" to update your KUBECONFIG environment variable based on your KUBE_VERSION variable." exit 1 - elif [ "${KIND_CLUSTER:-}" != "jh-ci-${KUBE_VERSION:-}" ]; then + elif [ "${KIND_CLUSTER:-}" != "jh-dev-${KUBE_VERSION:-}" ]; then echo "Assertion error: KIND_CLUSTER out of sync with KUBE_VERSION" echo "KIND_CLUSTER=${KIND_CLUSTER:-}" echo "KUBE_VERSION=${KUBE_VERSION:-}" @@ -30,7 +30,7 @@ fi echo "starting kind k8s cluster: ${KIND_CLUSTER}" kind create cluster --name=${KIND_CLUSTER} --image="kindest/node:v${KUBE_VERSION}" --config ci/kind-config.yaml -kubectl config set-context --current --namespace jh-ci +kubectl config set-context --current --namespace jh-dev kubectl get nodes # To test network policies, we need a custom CNI like Calico. We have disabled diff --git a/ci/upgrade b/ci/upgrade index a5073e775a..c756c826ed 100755 --- a/ci/upgrade +++ b/ci/upgrade @@ -23,9 +23,9 @@ echo "load the images the kind cluster" python3 ci/kind-load-docker-images.py --kind-cluster $KIND_CLUSTER --values ./jupyterhub/values.yaml echo "install our deployment" -helm upgrade jh-ci ./jupyterhub \ +helm upgrade jh-dev ./jupyterhub \ --install \ - --namespace jh-ci \ + --namespace jh-dev \ --values dev-config.yaml \ --wait diff --git a/dev.py b/dev.py index de329f5cd9..78b3ec11e1 100755 --- a/dev.py +++ b/dev.py @@ -21,14 +21,22 @@ import functools import os import pipes +import re import shutil import subprocess import sys +import textwrap import dotenv +import colorama +colorama.init() def depend_on(binaries=[], envs=[]): + """ + A decorator to ensure the function is called with the relevant binaries + available and relevant environment variables set. + """ def decorator_depend_on(func): @functools.wraps(func) def wrapper_depend_on(*args, **kwargs): @@ -38,14 +46,18 @@ def wrapper_depend_on(*args, **kwargs): missing_binaries.append(binary) missing_envs = [] for env in envs: - if os.environ.get(env) is None: + if not os.environ.get(env): missing_envs.append(env) if missing_binaries or missing_envs: print('Exiting due to missing dependencies for "%s"' % func.__name__) print("- Binaries: %s" % missing_binaries) print("- Env vars: %s" % missing_envs) - + print("") + if missing_binaries: + print("Install and make the binaries available on your PATH!") + if missing_envs: + print("Update your .env file!") sys.exit(1) else: return func(*args, **kwargs) @@ -55,73 +67,359 @@ def wrapper_depend_on(*args, **kwargs): return decorator_depend_on -@depend_on(binaries=["kind"], envs=["KUBECONFIG"]) -def kind_start(force): - # check if there is a cluster existing already - # then delete it +@depend_on(binaries=["kind"], envs=["KUBE_VERSION"]) +def kind_start(recreate): + # check for a existing jh-dev cluster and conditionally delete it + kind_clusters = _run( + cmd=["kind", "get", "clusters"], + print_command=False, + capture_output=True, + ) + kind_cluster_exist = bool(re.search(r"\bjh-dev\b", kind_clusters)) + if kind_cluster_exist: + print('The kind cluster "jh-dev" exists already.') + if recreate: + _run(["kind", "delete", "cluster", "--name", "jh-dev"]) + else: + sys.exit(1) + # start a new cluster with a fixed name, kubernetes version - # configure a default namespace + print('Creating kind cluster "jh-dev".') + _run([ + "kind", "create", "cluster", + "--name", "jh-dev", + "--image", "kindest/node:v%s" % os.environ["KUBE_VERSION"], + "--config", "ci/kind-config.yaml", + ]) - # install calico - # install helm - pass + kubeconfig_path = _run( + cmd=[ + "kind", "get", "kubeconfig-path", + "--name", "jh-dev", + ], + print_command=False, + capture_output=True, + ) + if os.environ["KUBECONFIG"] != kubeconfig_path: + print("Updating your .env file's KUBECONFIG value to \"%s\"" % kubeconfig_path) + dotenv.set_key(".env", "KUBECONFIG", kubeconfig_path) + dotenv.load_dotenv() + + print('Making "jh-dev" the default namespace in the cluster.') + _run([ + "kubectl", "config", "set-context", + "--current", + "--namespace", "jh-dev", + ]) + + + # To test network policies, we need a custom CNI like Calico. We have disabled + # the default CNI through kind-config.yaml and will need to manually install a + # CNI for the nodes to become Ready. + # Setup daemonset/calico-etcd, a prerequisite for calico-node + print("Installing a custom CNI: Calico (async, in cluster)") + _run( + cmd=[ + "kubectl", "apply", + "-f", "https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/etcd.yaml", + ], + print_end="", + ) + # NOTE: A toleration to schedule on a node that isn't ready is missing, but + # this pod will be part of making sure the node can become ready. + # + # toleration: + # - key: node.kubernetes.io/not-ready + # effect: NoSchedule + _run( + cmd=[ + "kubectl", "patch", "daemonset/calico-etcd", + "--namespace", "kube-system", + "--type", "json", + "--patch", '[{"op":"add", "path":"/spec/template/spec/tolerations/-", "value":{"key":"node.kubernetes.io/not-ready", "effect":"NoSchedule"}}]', + ], + print_end="", + ) + # Setup daemonset/calico-node, that will allow nodes to enter a ready state + _run( + cmd=[ + "kubectl", "apply", + "-f", "https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/calico.yaml", + ], + print_end="", + ) + # NOTE: Connection details to daemonset/calico-etcd is missing so we need to + # manually add them. + calico_etcd_endpoint = _run( + cmd=[ + "kubectl", "get", "service/calico-etcd", + "--namespace", "kube-system", + "--output", "jsonpath=http://{.spec.clusterIP}:{.spec.ports[0].port}", + ], + print_command=False, + capture_output=True, + ) + _run( + cmd=[ + "kubectl", "patch", "configmap/calico-config", + "--namespace", "kube-system", + "--type", "merge", + "--patch", '{"data":{"etcd_endpoints":"%s"}}' % calico_etcd_endpoint, + ], + print_end="", + ) + # NOTE: daemonset/calico-node pods' main container fails to start up without + # an additional environment variable configured to disable a check + # that we fail. + # + # env: + # - name: FELIX_IGNORELOOSERPF + # value: "true" + _run( + cmd=[ + "kubectl", "patch", "daemonset/calico-node", + "--namespace", "kube-system", + "--type", "json", + "--patch", '[{"op":"add", "path":"/spec/template/spec/containers/0/env/-", "value":{"name":"FELIX_IGNORELOOSERPF", "value":"true"}}]', + ], + ) -@depend_on(binaries=["kind"], envs=["KUBECONFIG"]) + print("Waiting for Kubernetes nodes to become ready.") + _run( + # NOTE: kubectl wait has a bug relating to using the --all flag in 1.13 + # at least Due to this, we wait only for the kind-control-plane + # node, which currently is the only node we start with kind but + # could be configured in kind-config.yaml. + # + # ref: https://github.com/kubernetes/kubernetes/pull/71746 + cmd=[ + "kubectl", "wait", "node/jh-dev-control-plane", + "--for", "condition=ready", + "--timeout", "2m", + ], + error_callback=_log_wait_node_timeout, + ) + + print("Installing Helm's tiller asynchronously in the cluster.") + _run( + cmd=[ + "kubectl", "create", "serviceaccount", "tiller", + "--namespace", "kube-system", + ], + print_end="", + ) + _run( + cmd=[ + "kubectl", "create", "clusterrolebinding", "tiller", + "--clusterrole", "cluster-admin", + "--serviceaccount", "kube-system:tiller", + ], + print_end="", + ) + _run([ + "helm", "init", + "--service-account", "tiller", + ]) + + print("Waiting for Helm's tiller to become ready in the cluster.") + _run( + cmd=[ + "kubectl", "rollout", "status", "deployment/tiller-deploy", + "--namespace", "kube-system", + "--timeout", "2m", + ], + error_callback=_log_tiller_rollout_timeout, + ) + + print('Kind cluster "jh-dev" successfully setup!') + + +@depend_on(binaries=["kind"], envs=[]) def kind_stop(): - # delete the kind cluster - pass + print('Deleting kind cluster "jh-dev".') + _run(["kind", "delete", "cluster", "--name", "jh-dev"]) + +@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "CHARTPRESS_COMMIT_RANGE"]) +def upgrade(values): + print("Building images and updating image tags if needed.") + commit_range = os.environ.get( + "TRAVIS_COMMIT_RANGE", + os.environ["CHARTPRESS_COMMIT_RANGE"] + ) + _run([ + "chartpress", + "--commit-range", commit_range, + ]) + # git --no-pager diff + + if "kind-config-jh-dev" in os.environ["KUBECONFIG"]: + print("Loading the locally built images into the kind cluster.") + cmd = [ + "python3", "ci/kind-load-docker-images.py", + "--kind-cluster", "jh-dev", + ] + for value in values: + cmd.append("--values") + cmd.append(value) + _run(cmd=cmd) + + + print("Installing/upgrading the Helm chart on the Kubernetes cluster.") + _run([ + "helm", "upgrade", "jh-dev", "./jupyterhub", + "--install", + "--namespace", "jh-dev", + "--values", "dev-config.yaml", + "--wait", + ]) + + print("Waiting for the proxy and hub to become ready.") + _run( + cmd=[ + "kubectl", "rollout", "status", "deployment/proxy", + "--timeout", "1m", + ], + print_end="" + ) + _run([ + "kubectl", "rollout", "status", "deployment/hub", + "--timeout", "1m", + ]) -@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG"]) -def upgrade(): - # consider commit-range - # run chartpress - # (conditionally) load images to a kind cluster - # helm upgrade / install with dev-config - # (?) port-forward - pass + # FIXME: we don't do any port-forwarding @depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG"]) def test(): - # pytest - pass + _run(["pytest", "-v", "--exitfirst", "./tests"]) @depend_on(binaries=["kubectl", "kubeval", ], envs=[]) def check_templates(): - # lint-and-validate script - pass + kubernetes_versions = None + kubernetes_versions = kubernetes_versions or os.environ.get("VALIDATE_KUBE_VERSIONS", None) + kubernetes_versions = kubernetes_versions or os.environ.get("KUBE_VERSION", None) + + _run([ + "python3", "tools/templates/lint-and-validate.py", + "--kubernetes-versions", kubernetes_versions, + ]) @depend_on(binaries=["black"], envs=[]) def check_python_code(apply): - # black - pass + raise NotImplementedError() + # invoke black @depend_on(binaries=[], envs=["GITHUB_ACCESS_TOKEN"]) def changelog(): - # req: GITHUB_ACCESS_TOKEN + raise NotImplementedError() + # invoke gitlab-activity + + +def _log_tiller_rollout_timeout(): + print("Helm's tiller never became ready!") + _run( + cmd=["kubectl", "describe", "nodes",], + exit_on_error=False, + print_end="", + ) + _run( + cmd=[ + "kubectl", "describe", "deployment/tiller", + "--namespace", "kube-system", + ], + exit_on_error=False, + print_end="", + ) + _run( + cmd=[ + "kubectl", "logs", "deployment/tiller", + "--namespace", "kube-system", + ], + exit_on_error=False, + ) - # gitlab-activity - pass +def _log_wait_node_timeout(): + print("Kubernetes nodes never became ready") + _run( + cmd=["kubectl", "describe", "nodes",], + exit_on_error=False, + print_end="", + ) + _run( + cmd=[ + "kubectl", "describe", "calico-etcd", + "--namespace", "kube-system", + ], + exit_on_error=False, + print_end="", + ) + _run( + cmd=[ + "kubectl", "logs", "calico-etcd", + "--namespace", "kube-system", + ], + exit_on_error=False, + print_end="", + ) + _run( + cmd=[ + "kubectl", "describe", "calico-node", + "--namespace", "kube-system", + ], + exit_on_error=False, + print_end="", + ) + _run( + cmd=[ + "kubectl", "logs", "calico-node", + "--namespace", "kube-system", + ], + exit_on_error=False, + ) -def _check_output(cmd, **kwargs): + +def _print_command(text): + print( + colorama.Style.BRIGHT + + "$ " + + colorama.Fore.GREEN + + text + + colorama.Style.RESET_ALL + + colorama.Fore.WHITE + + colorama.Style.DIM + ) + +def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): """Run a subcommand and exit if it fails""" - try: - return subprocess.check_output(cmd, **kwargs) - except subprocess.CalledProcessError as e: + if kwargs.get("capture_output", None): + if kwargs.get("text", None) is None: + kwargs["text"] = True + + if print_command: + _print_command(" ".join(map(pipes.quote, cmd))) + completed_process = subprocess.run(cmd, **kwargs) + if print_command: + print(colorama.Style.RESET_ALL, end=print_end) + + if completed_process.returncode != 0: print( - "`{}` exited with status {}".format( - " ".join(map(pipes.quote, cmd)), e.returncode - ), + "`{}` errored ({})".format(" ".join(map(pipes.quote, cmd)), e.returncode), file=sys.stderr, ) - sys.exit(e.returncode) + if error_callback: + error_callback(cmd) + if exit_on_error: + sys.exit(e.returncode) + + if completed_process.stdout: + return completed_process.stdout.strip() def _get_argparser(): @@ -138,8 +436,7 @@ def _get_argparser(): "start", help="Start and initialize a kind Kubernetes cluster." ) kind_start.add_argument( - "-f", - "--force", + "--recreate", action="store_true", help="If the cluster is already started, delete it and start a new.", ) @@ -150,6 +447,13 @@ def _get_argparser(): upgrade = _cmds.add_parser( "upgrade", help="Install or upgrade the Helm chart in the Kubernetes cluster." ) + upgrade.add_argument( + "-f", + "--values", + action="append", + default=["dev-config.yaml"], + help="A Helm values file, this argument can be passed multiple times.", + ) test = _cmds.add_parser( "test", help="Run tests on the deployed Helm chart in the Kubernetes cluster." @@ -185,21 +489,53 @@ def _get_argparser(): argparser = _get_argparser() args = argparser.parse_args() - # DEBUGGING: - print(args) + # initialize defaults and load environment variables from the .env file + if not os.path.exists(".env"): + default_dotenv_file = textwrap.dedent( + """\ + ## Environment variables loaded and used by the ./dev script. + # + ## GITHUB_ACCESS_TOKEN is needed to generate changelog entries etc. + ## + GITHUB_ACCESS_TOKEN= + # + ## CHARTPRESS_COMMIT_RANGE can help us avoids image rebuilds. If + ## the main repo remote isn't named origin, correct it here. + ## + CHARTPRESS_COMMIT_RANGE=origin/master..HEAD + # + ## KUBECONFIG is required to be set explicitly in order to avoid + ## potential modifications of non developer clusters. It should + ## be to the path where the kubernetes config resides. + ## + KUBECONFIG= + # + ## KUBE_VERSION is used to create a kind cluster and as a fallback + ## if you have not specified VALIDATE_KUBE_VERSIONS. + ## + KUBE_VERSION=1.15.3 + # + ## VALIDATE_KUBE_VERSIONS is used when you check your Helm + ## templates. Are the generated Kubernetes resources valid + ## resources for these Kubernetes versions? + ## + # VALIDATE_KUBE_VERSIONS=1.14.0,1.15.0 + """ + ) + with open('.env', 'w+') as f: + f.write(default_dotenv_file) - # load environment variables from the .env file dotenv.load_dotenv() # run suitable command and pass arguments if args.cmd == "kind": if args.sub_cmd == "start": - kind_start(force=args.force) + kind_start(recreate=args.recreate) if args.sub_cmd == "stop": kind_stop() if args.cmd == "upgrade": - upgrade() + upgrade(args.values) if args.cmd == "test": test() diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 21ccf4d67d..77221f13ab 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,5 +1,5 @@ name: jupyterhub -version: 0.9-dev +version: '0.9-dev' appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org diff --git a/tests/test_spawn.py b/tests/test_spawn.py index f163085541..05095c7eb8 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -59,9 +59,9 @@ def test_hub_api_create_user_and_get_information_about_user(api_request, jupyter [I 2019-09-25 12:03:12.126 JupyterHub log:174] 201 POST /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 20.74ms [I 2019-09-25 12:03:12.153 JupyterHub log:174] 200 GET /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 11.91ms [D 2019-09-25 12:03:12.180 JupyterHub user:240] Creating for testuser-7c70eb90-035b-4d9f-92a5-482e441e307d: - [I 2019-09-25 12:03:12.204 JupyterHub reflector:199] watching for pods with label selector='component=singleuser-server' in namespace jh-ci + [I 2019-09-25 12:03:12.204 JupyterHub reflector:199] watching for pods with label selector='component=singleuser-server' in namespace jh-dev [D 2019-09-25 12:03:12.205 JupyterHub reflector:202] Connecting pods watcher - [I 2019-09-25 12:03:12.229 JupyterHub reflector:199] watching for events with field selector='involvedObject.kind=Pod' in namespace jh-ci + [I 2019-09-25 12:03:12.229 JupyterHub reflector:199] watching for events with field selector='involvedObject.kind=Pod' in namespace jh-dev [D 2019-09-25 12:03:12.229 JupyterHub reflector:202] Connecting events watcher [I 2019-09-25 12:03:12.269 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 98.85ms """ @@ -140,7 +140,7 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): c = subprocess.run( [ "kubectl", - "--namespace=jh-ci", + "--namespace=jh-dev", "exec", pod_name, "--", @@ -156,7 +156,7 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): c = subprocess.run( [ "kubectl", - "--namespace=jh-ci", + "--namespace=jh-dev", "exec", pod_name, "--", From ff689e8a14391d4078f6409bc961392d9f629f19 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:21:44 +0200 Subject: [PATCH 33/77] start-k8s / lint-and-validate with ./dev.py --- .travis.yml | 4 ++-- dev.py | 14 +++++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 29e122b98c..f930ff8b89 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,7 +36,7 @@ script: - setup_kind - setup_helm - ./ci/travis-docker-fix - - ./ci/start-k8s + - ./dev.py check templates - ./ci/upgrade - ./ci/test env: @@ -66,7 +66,7 @@ jobs: script: - setup_helm - setup_kubeval - - python3 tools/templates/lint-and-validate.py --kubernetes-versions $VALIDATE_KUBE_VERSIONS + - ./dev.py check templates env: [] - stage: publish script: diff --git a/dev.py b/dev.py index 78b3ec11e1..a6aa245d8c 100755 --- a/dev.py +++ b/dev.py @@ -105,7 +105,7 @@ def kind_start(recreate): if os.environ["KUBECONFIG"] != kubeconfig_path: print("Updating your .env file's KUBECONFIG value to \"%s\"" % kubeconfig_path) dotenv.set_key(".env", "KUBECONFIG", kubeconfig_path) - dotenv.load_dotenv() + os.environ["KUBECONFIG"] = kubeconfig_path print('Making "jh-dev" the default namespace in the cluster.') _run([ @@ -493,7 +493,8 @@ def _get_argparser(): if not os.path.exists(".env"): default_dotenv_file = textwrap.dedent( """\ - ## Environment variables loaded and used by the ./dev script. + ## Environment variables loaded and used by the ./dev script. They + ## will take precedence over system variables. # ## GITHUB_ACCESS_TOKEN is needed to generate changelog entries etc. ## @@ -511,9 +512,12 @@ def _get_argparser(): KUBECONFIG= # ## KUBE_VERSION is used to create a kind cluster and as a fallback - ## if you have not specified VALIDATE_KUBE_VERSIONS. + ## if you have not specified VALIDATE_KUBE_VERSIONS. Note that only + ## versions that are found on kindest/node can be used. ## - KUBE_VERSION=1.15.3 + ## ref: https://hub.docker.com/r/kindest/node/tags + ## + # KUBE_VERSION=1.15.3 # ## VALIDATE_KUBE_VERSIONS is used when you check your Helm ## templates. Are the generated Kubernetes resources valid @@ -525,7 +529,7 @@ def _get_argparser(): with open('.env', 'w+') as f: f.write(default_dotenv_file) - dotenv.load_dotenv() + dotenv.load_dotenv(override=True) # run suitable command and pass arguments if args.cmd == "kind": From 25b341ad91e04babd80361c575e43b965c5a8e58 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:26:27 +0200 Subject: [PATCH 34/77] Add colorama and python-dotenv --- dev-requirements.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dev-requirements.txt b/dev-requirements.txt index 3048a4b575..d81d6fc874 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -20,3 +20,7 @@ requests ## yamllint is used by the tools/templates/lint-and-validate.py ## yamllint>=1.17.0 + +## ./dev.py use these +colorama +python-dotenv From 81fe82e36c4dcf14fc61799e34ab033f07bf2352 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:36:41 +0200 Subject: [PATCH 35/77] Add missed dependencies --- dev.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev.py b/dev.py index a6aa245d8c..95e2e92545 100755 --- a/dev.py +++ b/dev.py @@ -297,7 +297,7 @@ def test(): _run(["pytest", "-v", "--exitfirst", "./tests"]) -@depend_on(binaries=["kubectl", "kubeval", ], envs=[]) +@depend_on(binaries=["kubectl", "helm", "yamllint", "kubeval"], envs=[]) def check_templates(): kubernetes_versions = None kubernetes_versions = kubernetes_versions or os.environ.get("VALIDATE_KUBE_VERSIONS", None) From aa1760b25b20f9a6c421bb81cec8d70c74d332cb Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:40:02 +0200 Subject: [PATCH 36/77] Support Python 3.6 --- dev.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dev.py b/dev.py index 95e2e92545..8cbb38f1c4 100755 --- a/dev.py +++ b/dev.py @@ -426,12 +426,14 @@ def _get_argparser(): _ = argparse.ArgumentParser( description="Local development help for jupyterhub/zero-to-jupyterhub-k8s" ) - _cmds = _.add_subparsers(title="Commands", dest="cmd", required=True) + + _cmds = _.add_subparsers(title="Commands", dest="cmd") kind = _cmds.add_parser( "kind", help="Kubernetes-in-Docker (kind) cluster management." ) - kind_cmds = kind.add_subparsers(title="Commands", dest="sub_cmd", required=True) + + kind_cmds = kind.add_subparsers(title="Commands", dest="sub_cmd") kind_start = kind_cmds.add_parser( "start", help="Start and initialize a kind Kubernetes cluster." ) @@ -462,7 +464,7 @@ def _get_argparser(): check = _cmds.add_parser( "check", help="Run checks on your developed helm templates and python code." ) - check_cmds = check.add_subparsers(title="Commands", dest="sub_cmd", required=True) + check_cmds = check.add_subparsers(title="Commands", dest="sub_cmd") check_templates = check_cmds.add_parser( "templates", help="Run checks on the Helm templates and the Kubernetes resources they generate using: helm lint, helm templates, yamllint, and kubeval.", From 810c70be9afb037eb0dba204ab3408ebf342617e Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:46:37 +0200 Subject: [PATCH 37/77] Lint/Validate does not require kubectl --- dev.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev.py b/dev.py index 8cbb38f1c4..edbc8ecf90 100755 --- a/dev.py +++ b/dev.py @@ -297,7 +297,7 @@ def test(): _run(["pytest", "-v", "--exitfirst", "./tests"]) -@depend_on(binaries=["kubectl", "helm", "yamllint", "kubeval"], envs=[]) +@depend_on(binaries=["helm", "yamllint", "kubeval"], envs=[]) def check_templates(): kubernetes_versions = None kubernetes_versions = kubernetes_versions or os.environ.get("VALIDATE_KUBE_VERSIONS", None) From 5f0627a7d19a5951f8fb5fccf1428f30ba6f0375 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 00:55:51 +0200 Subject: [PATCH 38/77] Fail early in TravisCI --- .travis.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index f930ff8b89..3bde26e03a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,6 +27,7 @@ stages: branch in (master) AND type in (push) install: + - set -e - pip3 install --no-cache-dir -r dev-requirements.txt - . ci/common ci @@ -36,9 +37,10 @@ script: - setup_kind - setup_helm - ./ci/travis-docker-fix - - ./dev.py check templates - - ./ci/upgrade - - ./ci/test + - ./dev.py kind start + - ./dev.py upgrade + - kubectl port-forward svc/proxy-public 8080:80 > /dev/null 2>&1 & + - ./dev.py test env: ## NOTE: The environment variables will be expanded to multiple jobs. For ## additional individual jobs, only the first entry is used. From 76745596bc4fad1c8800f6f47dd233127696d5ae Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 01:08:18 +0200 Subject: [PATCH 39/77] More Python 3.6 compability --- dev.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dev.py b/dev.py index edbc8ecf90..f1cce48be3 100755 --- a/dev.py +++ b/dev.py @@ -402,6 +402,11 @@ def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callba if kwargs.get("text", None) is None: kwargs["text"] = True + # FIXME: This is a workaround for Python 3.6 that won't be required in + # Python 3.7. + del kwargs["capture_output"] + kwargs["stdout"] = kwargs["stderr"] = subprocess.PIPE + if print_command: _print_command(" ".join(map(pipes.quote, cmd))) completed_process = subprocess.run(cmd, **kwargs) From 1ddda02ab3df5e3bb9b9c6e0ea307adea69d2da2 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 01:17:37 +0200 Subject: [PATCH 40/77] Even more Python 3.6 compability --- dev.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/dev.py b/dev.py index f1cce48be3..9932ef24b7 100755 --- a/dev.py +++ b/dev.py @@ -399,9 +399,6 @@ def _print_command(text): def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): """Run a subcommand and exit if it fails""" if kwargs.get("capture_output", None): - if kwargs.get("text", None) is None: - kwargs["text"] = True - # FIXME: This is a workaround for Python 3.6 that won't be required in # Python 3.7. del kwargs["capture_output"] @@ -424,7 +421,7 @@ def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callba sys.exit(e.returncode) if completed_process.stdout: - return completed_process.stdout.strip() + return completed_process.stdout.decode("utf-8").strip() def _get_argparser(): From fea69df82ff17cc4512753ed47e0a820d186cfdc Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 01:31:54 +0200 Subject: [PATCH 41/77] Even more and more Python 3.6 compability --- dev.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dev.py b/dev.py index 9932ef24b7..66cf55cb7f 100755 --- a/dev.py +++ b/dev.py @@ -75,6 +75,8 @@ def kind_start(recreate): print_command=False, capture_output=True, ) + print(kind_clusters) + tmp = re.search(r"\bjh-dev\b", kind_clusters) kind_cluster_exist = bool(re.search(r"\bjh-dev\b", kind_clusters)) if kind_cluster_exist: print('The kind cluster "jh-dev" exists already.') @@ -422,6 +424,8 @@ def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callba if completed_process.stdout: return completed_process.stdout.decode("utf-8").strip() + elif kwargs.get("stdout", None): + return "" def _get_argparser(): From 1c7a57049d0002466e0f2894502540e1701460ee Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 02:08:32 +0200 Subject: [PATCH 42/77] Bugfix ./dev.py upgrade --- dev.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dev.py b/dev.py index 66cf55cb7f..253cc29207 100755 --- a/dev.py +++ b/dev.py @@ -259,24 +259,24 @@ def upgrade(values): if "kind-config-jh-dev" in os.environ["KUBECONFIG"]: print("Loading the locally built images into the kind cluster.") - cmd = [ + _run([ "python3", "ci/kind-load-docker-images.py", "--kind-cluster", "jh-dev", - ] - for value in values: - cmd.append("--values") - cmd.append(value) - _run(cmd=cmd) + ]) print("Installing/upgrading the Helm chart on the Kubernetes cluster.") - _run([ + cmd = [ "helm", "upgrade", "jh-dev", "./jupyterhub", "--install", "--namespace", "jh-dev", "--values", "dev-config.yaml", "--wait", - ]) + ] + for value in values: + cmd.append("--values") + cmd.append(value) + _run(cmd) print("Waiting for the proxy and hub to become ready.") _run( From d44095c2fe6f0e8eceb7985aba1845aaa7aefe95 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 02:38:10 +0200 Subject: [PATCH 43/77] Fixes... --- dev.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/dev.py b/dev.py index 253cc29207..d112ef4527 100755 --- a/dev.py +++ b/dev.py @@ -270,25 +270,23 @@ def upgrade(values): "helm", "upgrade", "jh-dev", "./jupyterhub", "--install", "--namespace", "jh-dev", - "--values", "dev-config.yaml", - "--wait", ] for value in values: cmd.append("--values") cmd.append(value) - _run(cmd) + _run(cmd=cmd) print("Waiting for the proxy and hub to become ready.") _run( cmd=[ "kubectl", "rollout", "status", "deployment/proxy", - "--timeout", "1m", + "--timeout", "2m", ], print_end="" ) _run([ "kubectl", "rollout", "status", "deployment/hub", - "--timeout", "1m", + "--timeout", "2m", ]) # FIXME: we don't do any port-forwarding @@ -414,13 +412,15 @@ def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callba if completed_process.returncode != 0: print( - "`{}` errored ({})".format(" ".join(map(pipes.quote, cmd)), e.returncode), + "`{}` errored ({})".format(" ".join(map(pipes.quote, cmd)), completed_process.returncode), file=sys.stderr, ) + if completed_process.stderr: + print(completed_process.stderr.decode("utf-8").strip()) if error_callback: error_callback(cmd) if exit_on_error: - sys.exit(e.returncode) + sys.exit(completed_process.returncode) if completed_process.stdout: return completed_process.stdout.decode("utf-8").strip() From 60e7b31128a948e905470e1eac8852131323498c Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 08:12:46 +0200 Subject: [PATCH 44/77] Attempt on port-forwarding from Python script --- .travis.yml | 1 - dev.py | 37 +++++++++++++++++++++++++++---------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3bde26e03a..049a76642d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,7 +39,6 @@ script: - ./ci/travis-docker-fix - ./dev.py kind start - ./dev.py upgrade - - kubectl port-forward svc/proxy-public 8080:80 > /dev/null 2>&1 & - ./dev.py test env: ## NOTE: The environment variables will be expanded to multiple jobs. For diff --git a/dev.py b/dev.py index d112ef4527..0359a0e68f 100755 --- a/dev.py +++ b/dev.py @@ -289,7 +289,11 @@ def upgrade(values): "--timeout", "2m", ]) - # FIXME: we don't do any port-forwarding + print("Run and forget about port-forwarding.") + _run( + cmd=["kubectl", "port-forward", "service/proxy-public", "8080:80"], + forget=True, + ) @depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG"]) @@ -396,7 +400,7 @@ def _print_command(text): colorama.Style.DIM ) -def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): +def _run(cmd, forget=False, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): """Run a subcommand and exit if it fails""" if kwargs.get("capture_output", None): # FIXME: This is a workaround for Python 3.6 that won't be required in @@ -406,24 +410,37 @@ def _run(cmd, print_command=True, print_end="\n", print_error=True, error_callba if print_command: _print_command(" ".join(map(pipes.quote, cmd))) - completed_process = subprocess.run(cmd, **kwargs) + if forget: + # Call and forget this process + with open(os.devnull, 'r+b', 0) as DEVNULL: + proc = subprocess.Popen( + cmd, + stdin=DEVNULL, + stdout=DEVNULL, + stderr=DEVNULL, + close_fds=True, + ) + return + else: + # This call will await completion + proc = subprocess.run(cmd, **kwargs) if print_command: print(colorama.Style.RESET_ALL, end=print_end) - if completed_process.returncode != 0: + if proc.returncode != 0: print( - "`{}` errored ({})".format(" ".join(map(pipes.quote, cmd)), completed_process.returncode), + "`{}` errored ({})".format(" ".join(map(pipes.quote, cmd)), proc.returncode), file=sys.stderr, ) - if completed_process.stderr: - print(completed_process.stderr.decode("utf-8").strip()) + if proc.stderr: + print(proc.stderr.decode("utf-8").strip()) if error_callback: error_callback(cmd) if exit_on_error: - sys.exit(completed_process.returncode) + sys.exit(proc.returncode) - if completed_process.stdout: - return completed_process.stdout.decode("utf-8").strip() + if proc.stdout: + return proc.stdout.decode("utf-8").strip() elif kwargs.get("stdout", None): return "" From a8177d26dcd6bd6398304dacc300f0ae2f79721c Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 09:37:34 +0200 Subject: [PATCH 45/77] Kube 1.16.1 etc. --- .travis.yml | 8 ++++---- ci/common | 29 +++++++---------------------- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/.travis.yml b/.travis.yml index 049a76642d..4905af6a0e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,7 +29,7 @@ stages: install: - set -e - pip3 install --no-cache-dir -r dev-requirements.txt - - . ci/common ci + - . ci/common stage: test script: @@ -53,11 +53,10 @@ env: ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes-sigs/kind/issues/197 ## - # - KUBE_VERSION=1.16 + - KUBE_VERSION=1.16.1 - KUBE_VERSION=1.15.3 - KUBE_VERSION=1.14.6 - KUBE_VERSION=1.13.10 - - KUBE_VERSION=1.12.10 jobs: ## include additional individual jobs @@ -68,7 +67,8 @@ jobs: - setup_helm - setup_kubeval - ./dev.py check templates - env: [] + env: + - VALIDATE_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0,1.16.0 - stage: publish script: - setup_helm diff --git a/ci/common b/ci/common index 9cf21ac8d9..949455b987 100755 --- a/ci/common +++ b/ci/common @@ -6,41 +6,32 @@ mkdir -p bin export PATH="$PWD/bin:$PATH" -## NOTE: export HUB_API_URL is required for it to be accessible from pytest -## -export HUB_API_URL=http://127.0.0.1:8080/hub/api - -## NOTE: We need to allow our CI system to override these env. variables +## NOTE: These are default values for relevant environment variables ## if [ -z ${KUBE_VERSION:-} ]; then ## NOTE: KUBE_VERSION is limited by the available kindest/node images ## ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes/kubernetes/releases - export KUBE_VERSION=1.13.10 + ## + export KUBE_VERSION=1.15.3 fi if [ -z ${KIND_VERSION:-} ]; then ## ref: https://github.com/kubernetes-sigs/kind/releases + ## export KIND_VERSION=0.5.1 fi if [ -z ${HELM_VERSION:-} ]; then ## ref: https://github.com/helm/helm/releases + ## export HELM_VERSION=2.14.3 fi if [ -z ${KUBEVAL_VERSION:-} ]; then ## ref: https://github.com/instrumenta/kubeval/releases + ## export KUBEVAL_VERSION=0.14.0 fi -## Valid versions to list under VALIDATE_KUBE_VERSIONS are those in the -## kubernetes-json-schema repository, used by kubeval. -## -## ref: https://github.com/instrumenta/kubernetes-json-schema -## -if [ -z ${VALIDATE_KUBE_VERSIONS:-} ]; then - export VALIDATE_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0 -fi - ## NOTE: The setup_... functions cache downloads but ensure the correct version ## setup_kubectl () { @@ -95,15 +86,9 @@ setup_git_crypt () { cp bin/git-crypt-${GIT_CRYPT_VERSION} bin/git-crypt } -if [ "$1" = "ci" ]; then - export KIND_CLUSTER=jh-dev-${KUBE_VERSION} - export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} -else +if [ "$1" = "--setup" ]; then setup_kubectl setup_kind setup_helm setup_kubeval - - export KIND_CLUSTER=dev - export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} fi From a6a48f43675c09988a6de74b2b7e2a29f311fb39 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 12:06:34 +0200 Subject: [PATCH 46/77] Disable k8s 1.16 until calico fix, etc. --- .travis.yml | 7 ++++++- ci/start-k8s | 4 ++-- dev.py | 20 +++++++++++++++++--- tests/conftest.py | 4 +++- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4905af6a0e..775374e2e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -53,7 +53,12 @@ env: ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes-sigs/kind/issues/197 ## - - KUBE_VERSION=1.16.1 + ## NOTE: KUBE_VERSION 1.16.1 is disabled until calcio has updated their + ## calico-etcd.yaml containing an old invalid DaemonSet's apiVersion. + ## + ## ref: https://github.com/projectcalico/calico/issues/2915 + ## + # - KUBE_VERSION=1.16.1 - KUBE_VERSION=1.15.3 - KUBE_VERSION=1.14.6 - KUBE_VERSION=1.13.10 diff --git a/ci/start-k8s b/ci/start-k8s index fa5678369e..7df807d481 100755 --- a/ci/start-k8s +++ b/ci/start-k8s @@ -38,7 +38,7 @@ kubectl get nodes # CNI for the nodes to become Ready. echo "installing a custom CNI: Calico (async, in cluster)" # Setup daemonset/calico-etcd, a prerequisite for calico-node -kubectl apply -f https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/etcd.yaml +kubectl apply -f https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/etcd.yaml # NOTE: A toleration to schedule on a node that isn't ready is missing, but # this pod will be part of making sure the node can become ready. # @@ -49,7 +49,7 @@ kubectl patch -n kube-system daemonset/calico-etcd --type='json' \ -p='[{"op":"add", "path":"/spec/template/spec/tolerations/-", "value":{"key":"node.kubernetes.io/not-ready", "effect":"NoSchedule"}}]' # Setup daemonset/calico-node, that will allow nodes to enter a ready state -curl -sSo ci/daemonset-calico-node.yaml https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/calico.yaml +curl -sSo ci/daemonset-calico-node.yaml https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/calico.yaml # NOTE: Connection details to daemonset/calico-etcd is missing so we need to # manually add them. CALICO_ETCD_IP=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.clusterIP}') diff --git a/dev.py b/dev.py index 0359a0e68f..edc2b34d46 100755 --- a/dev.py +++ b/dev.py @@ -244,7 +244,7 @@ def kind_stop(): _run(["kind", "delete", "cluster", "--name", "jh-dev"]) -@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "CHARTPRESS_COMMIT_RANGE"]) +@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "CHARTPRESS_COMMIT_RANGE", "PROXY_PUBLIC_SERVICE_PORT"]) def upgrade(values): print("Building images and updating image tags if needed.") commit_range = os.environ.get( @@ -291,12 +291,15 @@ def upgrade(values): print("Run and forget about port-forwarding.") _run( - cmd=["kubectl", "port-forward", "service/proxy-public", "8080:80"], + cmd=[ + "kubectl", "port-forward", "service/proxy-public", + "%s:80" % os.environ["PROXY_PUBLIC_SERVICE_PORT"], + ], forget=True, ) -@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG"]) +@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "PROXY_PUBLIC_SERVICE_HOST", "PROXY_PUBLIC_SERVICE_PORT"]) def test(): _run(["pytest", "-v", "--exitfirst", "./tests"]) @@ -534,6 +537,9 @@ def _get_argparser(): ## potential modifications of non developer clusters. It should ## be to the path where the kubernetes config resides. ## + ## The "./dev.py kind start" command will set this files KUBECONFIG + ## entry automatically on cluster creation. + ## KUBECONFIG= # ## KUBE_VERSION is used to create a kind cluster and as a fallback @@ -549,6 +555,14 @@ def _get_argparser(): ## resources for these Kubernetes versions? ## # VALIDATE_KUBE_VERSIONS=1.14.0,1.15.0 + # + ## PROXY_PUBLIC_SERVICE_HOST and PROXY_PUBLIC_SERVICE_PORT allow + ## you to run the tests if you have used kubectl to port forward + ## the proxy-public Kubernetes service manually with a custom + ## port or host ip. + ## + PROXY_PUBLIC_SERVICE_HOST=127.0.0.1 + PROXY_PUBLIC_SERVICE_PORT=8080 """ ) with open('.env', 'w+') as f: diff --git a/tests/conftest.py b/tests/conftest.py index f484f67a2a..e0ecd2d05d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,9 +15,11 @@ def request_data(): with open(os.path.join(basedir, "dev-config.yaml")) as f: y = yaml.safe_load(f) token = y["hub"]["services"]["test"]["apiToken"] + host = os.environ.get("ROXY_PUBLIC_SERVICE_HOST", "127.0.0.1"), + port = os.environ.get("PROXY_PUBLIC_SERVICE_PORT", "8080"), return { "token": token, - "hub_url": os.environ["HUB_API_URL"], + "hub_url": "http://%s:%s/hub/api" % (host, port), "headers": {"Authorization": f"token {token}"}, "test_timeout": 300, "request_timeout": 60, From 700d21cf7a6f679784c0b8d16722bbd92d04f336 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 15:35:31 +0200 Subject: [PATCH 47/77] Fix syntax bugs --- ci/kind-load-docker-images.py | 2 +- dev.py | 14 ++++++-------- tests/conftest.py | 6 +++--- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/ci/kind-load-docker-images.py b/ci/kind-load-docker-images.py index 1d410bbea5..ee17eb1e8a 100755 --- a/ci/kind-load-docker-images.py +++ b/ci/kind-load-docker-images.py @@ -78,7 +78,7 @@ def kind_load_docker_images(kind_cluster, images): continue check_output(["kind", "load", "docker-image", "--name", kind_cluster, image]) - print("### Loaded %s" % image) + print(f"### Loaded {image}") if __name__ == "__main__": diff --git a/dev.py b/dev.py index edc2b34d46..ba91a1feff 100755 --- a/dev.py +++ b/dev.py @@ -50,9 +50,9 @@ def wrapper_depend_on(*args, **kwargs): missing_envs.append(env) if missing_binaries or missing_envs: - print('Exiting due to missing dependencies for "%s"' % func.__name__) - print("- Binaries: %s" % missing_binaries) - print("- Env vars: %s" % missing_envs) + print(f'Exiting due to missing dependencies for "{func.__name__}"') + print(f"- Binaries: {missing_binaries}") + print(f"- Env vars: {missing_envs}") print("") if missing_binaries: print("Install and make the binaries available on your PATH!") @@ -75,8 +75,6 @@ def kind_start(recreate): print_command=False, capture_output=True, ) - print(kind_clusters) - tmp = re.search(r"\bjh-dev\b", kind_clusters) kind_cluster_exist = bool(re.search(r"\bjh-dev\b", kind_clusters)) if kind_cluster_exist: print('The kind cluster "jh-dev" exists already.') @@ -91,7 +89,7 @@ def kind_start(recreate): _run([ "kind", "create", "cluster", "--name", "jh-dev", - "--image", "kindest/node:v%s" % os.environ["KUBE_VERSION"], + "--image", f"kindest/node:v{os.environ['KUBE_VERSION']}", "--config", "ci/kind-config.yaml", ]) @@ -105,7 +103,7 @@ def kind_start(recreate): ) if os.environ["KUBECONFIG"] != kubeconfig_path: - print("Updating your .env file's KUBECONFIG value to \"%s\"" % kubeconfig_path) + print(f'Updating your .env file\'s KUBECONFIG value to "{kubeconfig_path}"') dotenv.set_key(".env", "KUBECONFIG", kubeconfig_path) os.environ["KUBECONFIG"] = kubeconfig_path @@ -293,7 +291,7 @@ def upgrade(values): _run( cmd=[ "kubectl", "port-forward", "service/proxy-public", - "%s:80" % os.environ["PROXY_PUBLIC_SERVICE_PORT"], + f"{os.environ['PROXY_PUBLIC_SERVICE_PORT']}:80", ], forget=True, ) diff --git a/tests/conftest.py b/tests/conftest.py index e0ecd2d05d..e4db1edf6a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,11 +15,11 @@ def request_data(): with open(os.path.join(basedir, "dev-config.yaml")) as f: y = yaml.safe_load(f) token = y["hub"]["services"]["test"]["apiToken"] - host = os.environ.get("ROXY_PUBLIC_SERVICE_HOST", "127.0.0.1"), - port = os.environ.get("PROXY_PUBLIC_SERVICE_PORT", "8080"), + host = os.environ.get("ROXY_PUBLIC_SERVICE_HOST", "127.0.0.1") + port = os.environ.get("PROXY_PUBLIC_SERVICE_PORT", "8080") return { "token": token, - "hub_url": "http://%s:%s/hub/api" % (host, port), + "hub_url": f'http://{host}:{port}/hub/api', "headers": {"Authorization": f"token {token}"}, "test_timeout": 300, "request_timeout": 60, From 45bff5cfd3e01b655e21f39a40759879972eb105 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 8 Oct 2019 17:44:50 +0200 Subject: [PATCH 48/77] Deleted replaced bash scripts --- ci/start-k8s | 100 --------------------------------------------------- ci/test | 30 ---------------- ci/upgrade | 37 ------------------- dev.py | 38 ++++++++++++++++++-- 4 files changed, 36 insertions(+), 169 deletions(-) delete mode 100755 ci/start-k8s delete mode 100755 ci/test delete mode 100755 ci/upgrade diff --git a/ci/start-k8s b/ci/start-k8s deleted file mode 100755 index 7df807d481..0000000000 --- a/ci/start-k8s +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -set -eu - -## NOTE: This script assumes we have installed kind, but the common script doesn't -## -if [ "${KIND_CLUSTER:-}" == "" ]; then - echo "Run \". ./dev init\" first!" - exit 1 -elif [ "${KIND_CLUSTER:-}" != "dev" ]; then - if [ "${KUBECONFIG:-}" != "$(kind get kubeconfig-path --name="jh-dev-${KUBE_VERSION:-}")" ]; then - echo "Assertion error: KUBECONFIG out of sync with KUBE_VERSION" - echo "KUBECONFIG=${KUBECONFIG:-}" - echo "KUBE_VERSION=${KUBE_VERSION:-}" - echo "Run \". ./ci/common\" to update your KUBECONFIG environment variable based on your KUBE_VERSION variable." - exit 1 - elif [ "${KIND_CLUSTER:-}" != "jh-dev-${KUBE_VERSION:-}" ]; then - echo "Assertion error: KIND_CLUSTER out of sync with KUBE_VERSION" - echo "KIND_CLUSTER=${KIND_CLUSTER:-}" - echo "KUBE_VERSION=${KUBE_VERSION:-}" - echo "Run \". ./ci/common\" to update your KIND_CLUSTER environment variable based on your KUBE_VERSION variable." - exit 1 - fi -fi - -# If the kind k8s cluster for this k8s version is already running, restart it -if kind get clusters | grep --word-regexp ${KIND_CLUSTER}; then - echo "deleting existing kind k8s cluster: ${KIND_CLUSTER}" - kind delete cluster --name=${KIND_CLUSTER} -fi - -echo "starting kind k8s cluster: ${KIND_CLUSTER}" -kind create cluster --name=${KIND_CLUSTER} --image="kindest/node:v${KUBE_VERSION}" --config ci/kind-config.yaml -kubectl config set-context --current --namespace jh-dev -kubectl get nodes - -# To test network policies, we need a custom CNI like Calico. We have disabled -# the default CNI through kind-config.yaml and will need to manually install a -# CNI for the nodes to become Ready. -echo "installing a custom CNI: Calico (async, in cluster)" -# Setup daemonset/calico-etcd, a prerequisite for calico-node -kubectl apply -f https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/etcd.yaml -# NOTE: A toleration to schedule on a node that isn't ready is missing, but -# this pod will be part of making sure the node can become ready. -# -# toleration: -# - key: node.kubernetes.io/not-ready -# effect: NoSchedule -kubectl patch -n kube-system daemonset/calico-etcd --type='json' \ - -p='[{"op":"add", "path":"/spec/template/spec/tolerations/-", "value":{"key":"node.kubernetes.io/not-ready", "effect":"NoSchedule"}}]' - -# Setup daemonset/calico-node, that will allow nodes to enter a ready state -curl -sSo ci/daemonset-calico-node.yaml https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/calico.yaml -# NOTE: Connection details to daemonset/calico-etcd is missing so we need to -# manually add them. -CALICO_ETCD_IP=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.clusterIP}') -CALICO_ETCD_PORT=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.ports[0].port}') -sed -i -e "s/:/$CALICO_ETCD_IP:$CALICO_ETCD_PORT/" ci/daemonset-calico-node.yaml -kubectl apply -f ci/daemonset-calico-node.yaml -# NOTE: daemonset/calico-node pods' main container fails to start up without -# an additional environment variable configured to disable a check -# that we fail. -# -# env: -# - name: FELIX_IGNORELOOSERPF -# value: "true" -kubectl patch -n kube-system daemonset/calico-node --type='json' \ - -p='[{"op":"add", "path":"/spec/template/spec/containers/0/env/-", "value":{"name":"FELIX_IGNORELOOSERPF", "value":"true"}}]' - -echo "waiting for kubernetes nodes (in cluster)" -# NOTE: kubectl wait has a bug relating to using the --all flag in 1.13 at least -# Due to this, we wait only for the kind-control-plane node, which -# currently is the only node we start with kind but could be configured in -# kind-config.yaml. -# -# ref: https://github.com/kubernetes/kubernetes/pull/71746 -kubectl wait node/${KIND_CLUSTER}-control-plane --for condition=ready --timeout 2m || { - r=$? - echo "kubernetes nodes never became ready" - kubectl describe nodes || true - kubectl describe -n kube-system daemonset/calico-etcd || true - kubectl logs -n kube-system daemonset/calico-etcd || true - kubectl describe -n kube-system daemonset/calico-node || true - kubectl logs -n kube-system daemonset/calico-node || true - exit $r -} - -echo "installing tiller (async, in cluster)" -kubectl create serviceaccount tiller -n kube-system -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller - -echo "waiting for tiller (in cluster)" -kubectl rollout status -n kube-system deployment/tiller-deploy --timeout 1m || { - r=$? - echo "tiller never became ready" - kubectl describe nodes || true - kubectl describe -n kube-system deployment/tiller || true - kubectl logs -n kube-system deployment/tiller || true - exit $r -} diff --git a/ci/test b/ci/test deleted file mode 100755 index d7d3993d49..0000000000 --- a/ci/test +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -set -eu - -display_logs() { - echo "***** node *****" - kubectl describe node - echo "***** pods *****" - kubectl get pods - echo "***** events *****" - kubectl get events - echo "***** hub *****" - kubectl logs deploy/hub - echo "***** proxy *****" - kubectl logs deploy/proxy -} - -echo "running tests from outside the cluster:" -echo "- kubectl port-forward has enabled communication with services in the cluster" -## NOTE: -x / --exitfirst makes us avoid noise in the hub and proxy pod logs -## following a failure we are interested in debugging. -## -pytest ./tests -v --exitfirst || { - r=$? - echo "a test failed, here is relevant debugging information" - display_logs - exit $r -} - -## If tests succeeded show all pods to see if any were restarted -kubectl get pods diff --git a/ci/upgrade b/ci/upgrade deleted file mode 100755 index c756c826ed..0000000000 --- a/ci/upgrade +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -eu - -## set TRAVIS_COMMIT_RANGE if it is unset on a local CI run -## -## NOTE: Use an open ended range from the upstream or origin master branch to the -## current state including unstaged changes. -## -if [ -z ${TRAVIS_COMMIT_RANGE:-} ]; then - if git remote -v | grep --word-regex upstream; then - GIT_REMOTE=upstream/ - elif git remote -v | grep --word-regex origin; then - GIT_REMOTE=origin/ - fi - export TRAVIS_COMMIT_RANGE=${GIT_REMOTE:-}master.. -fi - -echo "build images and update the default values.yaml to reference them" -chartpress --commit-range ${TRAVIS_COMMIT_RANGE} -git --no-pager diff - -echo "load the images the kind cluster" -python3 ci/kind-load-docker-images.py --kind-cluster $KIND_CLUSTER --values ./jupyterhub/values.yaml - -echo "install our deployment" -helm upgrade jh-dev ./jupyterhub \ - --install \ - --namespace jh-dev \ - --values dev-config.yaml \ - --wait - -echo "waiting for hub and proxy to become responsive" -kubectl rollout status deployment/proxy --timeout 1m -kubectl rollout status deployment/hub --timeout 1m - -echo "couple a localhost port with svc/proxy-public to access JupyterHub API" -kubectl port-forward svc/proxy-public 8080:80 > /dev/null 2>&1 & diff --git a/dev.py b/dev.py index ba91a1feff..64fb9b3795 100755 --- a/dev.py +++ b/dev.py @@ -299,7 +299,13 @@ def upgrade(values): @depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "PROXY_PUBLIC_SERVICE_HOST", "PROXY_PUBLIC_SERVICE_PORT"]) def test(): - _run(["pytest", "-v", "--exitfirst", "./tests"]) + _run( + cmd=["pytest", "-v", "--exitfirst", "./tests"], + error_callback=_log_test_failure, + ) + + print("Tests succeeded!") + _run(cmd=["kubectl", "get", "pods"]) @depend_on(binaries=["helm", "yamllint", "kubeval"], envs=[]) @@ -326,6 +332,34 @@ def changelog(): # invoke gitlab-activity +def _log_test_failure(): + print("A test failed, let's debug!") + _run( + cmd=["kubectl", "describe", "nodes",], + exit_on_error=False, + print_end="", + ) + _run( + cmd=["kubectl", "get", "pods",], + exit_on_error=False, + print_end="", + ) + _run( + cmd=["kubectl", "get", "events",], + exit_on_error=False, + print_end="", + ) + _run( + cmd=["kubectl", "get", "logs", "deploy/hub",], + exit_on_error=False, + print_end="", + ) + _run( + cmd=["kubectl", "get", "logs", "deploy/proxy",], + exit_on_error=False, + ) + + def _log_tiller_rollout_timeout(): print("Helm's tiller never became ready!") _run( @@ -436,7 +470,7 @@ def _run(cmd, forget=False, print_command=True, print_end="\n", print_error=True if proc.stderr: print(proc.stderr.decode("utf-8").strip()) if error_callback: - error_callback(cmd) + error_callback() if exit_on_error: sys.exit(proc.returncode) From 6f28cb86d2eaeb309f6b8844abd87d9cd2b2363a Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 10 Oct 2019 15:32:35 +0200 Subject: [PATCH 49/77] Add automatic restart of dormant kind clusters --- dev.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/dev.py b/dev.py index 64fb9b3795..60bb6660f7 100755 --- a/dev.py +++ b/dev.py @@ -67,7 +67,7 @@ def wrapper_depend_on(*args, **kwargs): return decorator_depend_on -@depend_on(binaries=["kind"], envs=["KUBE_VERSION"]) +@depend_on(binaries=["kind", "docker"], envs=["KUBE_VERSION"]) def kind_start(recreate): # check for a existing jh-dev cluster and conditionally delete it kind_clusters = _run( @@ -77,11 +77,26 @@ def kind_start(recreate): ) kind_cluster_exist = bool(re.search(r"\bjh-dev\b", kind_clusters)) if kind_cluster_exist: - print('The kind cluster "jh-dev" exists already.') if recreate: + print("Deleting existing kind cluster named jh-dev.") _run(["kind", "delete", "cluster", "--name", "jh-dev"]) else: - sys.exit(1) + # This workaround currently only works for single node clusters, + # which is what we currently setup. + # + # ref: https://github.com/kubernetes-sigs/kind/issues/148#issuecomment-504712517 + is_kind_cluster_container_running = _run( + cmd=["docker", "ps", "--quiet", "--filter", "name=jh-dev-control-plane"], + print_command=False, + capture_output=True, + ) + if not is_kind_cluster_container_running: + print("Starting up the existing kind cluster's node container.") + _run(["docker", "start", "jh-dev-control-plane"]) + sys.exit(0) + else: + print("The kind cluster was already started and running.") + sys.exit(0) # start a new cluster with a fixed name, kubernetes version From c61e9215cce7a9b13120f9bf8e3b121012270333 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 10 Oct 2019 15:38:04 +0200 Subject: [PATCH 50/77] Make the calico install up to date --- dev.py | 61 +--------------------------------------------------------- 1 file changed, 1 insertion(+), 60 deletions(-) diff --git a/dev.py b/dev.py index 60bb6660f7..6730f9f56d 100755 --- a/dev.py +++ b/dev.py @@ -133,73 +133,14 @@ def kind_start(recreate): # To test network policies, we need a custom CNI like Calico. We have disabled # the default CNI through kind-config.yaml and will need to manually install a # CNI for the nodes to become Ready. - # Setup daemonset/calico-etcd, a prerequisite for calico-node print("Installing a custom CNI: Calico (async, in cluster)") _run( cmd=[ "kubectl", "apply", - "-f", "https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/etcd.yaml", + "-f", "https://docs.projectcalico.org/v3.10/manifests/calico.yaml", ], print_end="", ) - # NOTE: A toleration to schedule on a node that isn't ready is missing, but - # this pod will be part of making sure the node can become ready. - # - # toleration: - # - key: node.kubernetes.io/not-ready - # effect: NoSchedule - _run( - cmd=[ - "kubectl", "patch", "daemonset/calico-etcd", - "--namespace", "kube-system", - "--type", "json", - "--patch", '[{"op":"add", "path":"/spec/template/spec/tolerations/-", "value":{"key":"node.kubernetes.io/not-ready", "effect":"NoSchedule"}}]', - ], - print_end="", - ) - # Setup daemonset/calico-node, that will allow nodes to enter a ready state - _run( - cmd=[ - "kubectl", "apply", - "-f", "https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/calico.yaml", - ], - print_end="", - ) - # NOTE: Connection details to daemonset/calico-etcd is missing so we need to - # manually add them. - calico_etcd_endpoint = _run( - cmd=[ - "kubectl", "get", "service/calico-etcd", - "--namespace", "kube-system", - "--output", "jsonpath=http://{.spec.clusterIP}:{.spec.ports[0].port}", - ], - print_command=False, - capture_output=True, - ) - _run( - cmd=[ - "kubectl", "patch", "configmap/calico-config", - "--namespace", "kube-system", - "--type", "merge", - "--patch", '{"data":{"etcd_endpoints":"%s"}}' % calico_etcd_endpoint, - ], - print_end="", - ) - # NOTE: daemonset/calico-node pods' main container fails to start up without - # an additional environment variable configured to disable a check - # that we fail. - # - # env: - # - name: FELIX_IGNORELOOSERPF - # value: "true" - _run( - cmd=[ - "kubectl", "patch", "daemonset/calico-node", - "--namespace", "kube-system", - "--type", "json", - "--patch", '[{"op":"add", "path":"/spec/template/spec/containers/0/env/-", "value":{"name":"FELIX_IGNORELOOSERPF", "value":"true"}}]', - ], - ) print("Waiting for Kubernetes nodes to become ready.") _run( From 44f0f87379496fb8ee6c2ebafba4d7c72228cf7f Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Fri, 11 Oct 2019 12:55:27 +0200 Subject: [PATCH 51/77] Remove old bash script (dev) --- dev | 38 -------------------------------------- 1 file changed, 38 deletions(-) delete mode 100755 dev diff --git a/dev b/dev deleted file mode 100755 index a6ac040bc0..0000000000 --- a/dev +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -## dev is a script to help us get started performing typical task during local -## development without needing to learn everything at once -## -## - init -## - start-k8s -## - upgrade -## - lint-and-validate -## - test -## - -## if the script is sourced -if [ "${BASH_SOURCE[0]}" != "${0}" ]; then - if ! [ "$1" = "init" ]; then - echo "Only source the init command, run your command without a leading dot!" - else - if [ "$1" = "init" ]; then - . ./ci/common - pip3 install -r dev-requirements.txt - fi - fi -## else, the script isn't sourced -else - if [ "$1" = "init" ]; then - echo "The init command needs to be sourced, run it with \". ./ci/dev init\"" - else - if [ "$1" = "start-k8s" ]; then - ./ci/start-k8s - elif [ "$1" = "upgrade" ]; then - ./ci/upgrade - elif [ "$1" = "lint-and-validate" ]; then - python3 tools/templates/lint-and-validate.py --kubernetes-versions 1.13.0 - elif [ "$1" = "test" ]; then - ./ci/upgrade && ./ci/test - fi - fi -fi From 9b612ef5e07f18a2d10eb8ffe41a0e7b9cb269f0 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Fri, 11 Oct 2019 12:56:00 +0200 Subject: [PATCH 52/77] Rename dev.py to dev --- dev.py => dev | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename dev.py => dev (100%) diff --git a/dev.py b/dev similarity index 100% rename from dev.py rename to dev From e0cd9a23e6eda82b9497dd07c7ccab8839b0062f Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Sun, 13 Oct 2019 00:05:23 +0200 Subject: [PATCH 53/77] Rewrite of CONTRIBUTING.md etc. --- .travis.yml | 8 +- CONTRIBUTING.md | 211 ++++++++++++------ Vagrantfile | 2 +- ci/common | 12 +- dev | 62 ++--- jupyterhub/Chart.yaml | 2 +- ...nt-vm-setup.sh => vagrant-vm-root-setup.sh | 7 +- vagrant-vm-user-setup.sh | 20 ++ 8 files changed, 198 insertions(+), 126 deletions(-) rename vagrant-vm-setup.sh => vagrant-vm-root-setup.sh (85%) create mode 100644 vagrant-vm-user-setup.sh diff --git a/.travis.yml b/.travis.yml index 775374e2e6..262a155553 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,9 +37,9 @@ script: - setup_kind - setup_helm - ./ci/travis-docker-fix - - ./dev.py kind start - - ./dev.py upgrade - - ./dev.py test + - ./dev kind create + - ./dev upgrade + - ./dev test env: ## NOTE: The environment variables will be expanded to multiple jobs. For ## additional individual jobs, only the first entry is used. @@ -71,7 +71,7 @@ jobs: script: - setup_helm - setup_kubeval - - ./dev.py check templates + - ./dev check templates env: - VALIDATE_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0,1.16.0 - stage: publish diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a58eddb48b..ef515e403a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,44 +1,31 @@ # Contributing -Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter -contributor -guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). - -## Local development for a code contribution - -### Prepare git - -1. Install [git](https://www.git-scm.com/). To verify it is installed, run this - from a terminal. +We are very pleased to have you as a contributor, and we hope you will find your +impact on the projects valuable. Thank you for sharing your interests, ideas, +and skills with us! - ```bash - git version - ``` - -1. Make a GitHub fork of [this - repository](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) by creating - and then logging into your GitHub account and clicking the Fork button. +This is a [Jupyter](https://jupyter.org) project, so please start out by reading +the first page of the general [Jupyter contributor +guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). -1. Clone your fork to your local computer. +## Local development - ```bash - git clone http://github.com//zero-to-jupyterhub-k8s.git - cd zero-to-jupyterhub-k8s +### 1: Preparations - # make it easy to reference the projects GitHub repository as "upstream" - git remote add upstream https://github.com/jupyterhub/zero-to-jupyterhub-k8s +Before anything else, install [git](https://www.git-scm.com/), clone the +repository, and enter the repository directory. - # make it obvious what you reference by renaming a reference to your - # personal GitHub repository to "fork" - git remote rename origin fork - ``` +``` +git clone https://github.com/jupyterhub/zero-to-jupyterhub-k8s +cd zero-to-jupyterhub-k8s +``` -### Prepare Virtual Machine software +For local development, you will additional tools and we present you with two +options. Either you, a) start and work from a Virtual Machine that is +automatically prepared for development, or b) install the tools yourself and +work without a Virtual Machine. -A `Vagrantfile` is a way to prepare a Virtual Machine (VM), and we [have -one](Vagrantfile) to prepare a VM for local development! We can use it to get -a VM up and running, enter it with SSH, develop and run tests, and later shut -down without influencing our system. +#### a) Use a prepared Virtual Machine (VM) 1. Install VirtualBox by [downloading and running an installer](https://www.virtualbox.org/wiki/Downloads). @@ -46,64 +33,146 @@ down without influencing our system. 1. Install Vagrant by [downloading and running an installer](https://www.vagrantup.com/downloads.html). -### Develop and run tests - 1. Start a prepared VM and SSH into it. - ```bash - ## if you have suspended a VM earlier, use "vagrat resume" instead + ```shell + # if you have suspended a VM earlier, use "vagrat resume" instead vagrant up - ## enter a SSH session with the VM + # enter a SSH session with the VM vagrant ssh - ``` -2. Develop and test within the VM - - ```bash - ## run within the SSH session + # relocate to the repository folder that is mounted from outside the VM + # IMPORTANT: changes to this folder will be seen outside your VM cd zero-to-jupyterhub-k8s - - ## initialize some environment variables etc (notice the leading dot) - . ./dev init + ``` + +1. Do your development. + +1. Exit and suspend the VM + + ```shell + ## exit the SSH session + exit + vagrant suspend + ``` - ## start a k8s cluster - ./dev start-k8s + > **NOTE:** You can also use `vagrant destroy` to reset the VM state entirely. + +#### b) Install tools yourself + +This is what you need to install and make available on your PATH. + +- [docker](https://docs.docker.com/install/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- [helm](https://helm.sh/docs/using_helm/#installing-helm) +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [kubeval](https://kubeval.instrumenta.dev/installation/) +- Python 3.6+ ([Anaconda.com](https://www.anaconda.com/distribution/), [Python.org](https://www.python.org/downloads/)) +- Python dependencies installed + - `dev-requirements.txt` + - `doc/doc-requirements.txt` + +To verify you got it all right, you should be able to run the following commands +without error. + +``` +git --version +docker --version +kubectl version --client +helm version --client +kind --version +kubeval --version +pytest --version +chartpress --version +``` - ## install/upgrade the helm chart +### 2: Setup a Kubernetes cluster + +You will now need a Kubernetes cluster to work with, and we present you with two +options again. Either you, a) use an automated script that starts and sets up a +Kubernetes cluster for you using [`kind` +(Kubernetes-in-Docker)](https://kind.sigs.k8s.io), or b), you start and setup +your own Kubernetes cluster. + + +#### a) Automated use of `kind` + +```shell +# create and setup a local Kubernetes cluster +./dev kind create +``` + +> **NOTE:** You can also use the `--recreate` flag to recreate the cluster to +> get a clean slate, or first run `./dev kind delete`. + +#### b) Self-managed Kubernetes cluster + +- To be compatible with all test that currently are defined, your cluster need + to have a network policy controller that enforces the network policies. +- To use `./dev upgrade` or `./dev test`, you will note that you need to + explicitly declare the path of your Kubernetes config and what Kubernetes + context to use. This is enforced to ensure the script only works on a + Kubernetes cluster it is intended to work on. + +### 3: Install or upgrade your local Helm chart + +You have two options as usual to install or upgrade your local Helm chart, +either you a) use the automated script to install or upgrade, or you do it on +your own. + +TODO: learn properly about the chartpress --commit-ranger flag. + +#### a) Automated Helm chart install or upgrade + +1. Install or upgrade your local Helm chart + + ```shell ./dev upgrade + ``` + +1. Visit http://localhost:8080 - ## see the results - # visit http://localhost:8090 +#### b) Manual Helm chart install or upgrade - ## make a change - # ... +1. Use [`chartpress`](https://github.com/jupyterhub/chartpress) to rebuild + modified images if needed but also update the chart's + [values.yaml](jupyterhub/values.yaml) file with the appropriate image tags. - ## run tests - ./dev test + ```shell + chartpress --commit-range origin/master..HEAD ``` - -3. Close the SSH session - ```bash - ## exit the SSH session - exit - vagrant suspend - # vagrant halt - # vagrant destroy + > **NOTE:** If you use a kind cluster and have built new images that will + > only available locally, you must also load them into the kind cluster using + > the `kind load docker-image ` command. + +1. Use `helm` to install or upgrade your Helm chart. + + ```shell + helm upgrade jh-dev ./jupyterhub --install --namespace jh-dev ``` -> **NOTE:** You can also use `vagrant destroy` to reset the VM state entirely, -> but the start-k8s script will reset the k8s cluster if you have the same k8s -> version set as previous so it should be fine to just `halt` and do `up` again -> later. +1. Use `kubectl` to open up a network path to your cluster. + + ```shell + kubectl port-forward --namespace jh-dev service/proxy-public 8080:80 + ``` + +1. Visit http://localhost:8080 + +### 4: Run tests + +```shell +./dev test +``` -### Debugging issues +## Debugging issues Various things can go wrong while working with the local development environment, here are some typical issues and what to do about them. -#### Network errors +### Network errors Did you get an error like one of these below? @@ -139,7 +208,7 @@ As you may notice, typical keywords associated with network errors are: Did you get an error like this? ``` -Unable to listen on port 8090: Listeners failed to create with the following errors: [Unable to create listener: Error listen tcp4 127.0.0.1:8090: bind: address already in use Unable to create listener: Error listen tcp6 [::1]:8090: bind: address already in use] +Unable to listen on port 8080: Listeners failed to create with the following errors: [Unable to create listener: Error listen tcp4 127.0.0.1:8080: bind: address already in use Unable to create listener: Error listen tcp6 [::1]:8080: bind: address already in use] ``` The key to solving this is understanding it! @@ -155,7 +224,7 @@ Let's look on how we need traffic to be shuttled! When you run `vagrant up` your computer will read the [Vagrantfile](Vagrantfile) and from that conclude it should shuttle traffic - incoming to your computer on port `8090` to your VM on port `8080`. + incoming to your computer on port `8080` to your VM on port `8080`. 2. *Traffic entering your VM should go to your Kubernetes cluster's Service named `proxy-public`.* @@ -165,7 +234,7 @@ Let's look on how we need traffic to be shuttled! with the hub and proxy even though it is also possible to speak directly to the hub. -In short, the traffic is routed from computer (8090), to the VM (8080), to the +In short, the traffic is routed from computer (8080), to the VM (8080), to the Kubernetes `proxy-public` Service (80). The reason you may run into an issue if is there is another service already diff --git a/Vagrantfile b/Vagrantfile index 9f3e00faca..056c233b98 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -15,6 +15,6 @@ Vagrant.configure("2") do |config| config.vm.network "forwarded_port", guest: 8080, host: 8090 - config.vm.provision "shell", path: "vagrant-vm-setup.sh" + config.vm.provision "shell", path: "vagrant-vm-root-setup.sh" config.vm.synced_folder ".", "/home/vagrant/zero-to-jupyterhub-k8s" end diff --git a/ci/common b/ci/common index 949455b987..2626251940 100755 --- a/ci/common +++ b/ci/common @@ -37,7 +37,7 @@ fi setup_kubectl () { echo "setup kubectl ${KUBE_VERSION}" if ! [ -f "bin/kubectl-${KUBE_VERSION}" ]; then - curl -Lo "bin/kubectl-${KUBE_VERSION}" "https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl" + curl -sSLo "bin/kubectl-${KUBE_VERSION}" "https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl" chmod +x "bin/kubectl-${KUBE_VERSION}" fi cp "bin/kubectl-${KUBE_VERSION}" bin/kubectl @@ -46,7 +46,7 @@ setup_kubectl () { setup_kind () { echo "setup kind ${KIND_VERSION}" if ! [ -f "bin/kind-${KIND_VERSION}" ]; then - curl -Lo "bin/kind-${KIND_VERSION}" "https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64" + curl -sSLo "bin/kind-${KIND_VERSION}" "https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64" chmod +x "bin/kind-${KIND_VERSION}" fi cp "bin/kind-${KIND_VERSION}" bin/kind @@ -55,7 +55,7 @@ setup_kind () { setup_helm () { echo "setup helm ${HELM_VERSION}" if ! [ -f "bin/helm-${HELM_VERSION}" ]; then - curl -Lo "bin/helm-${HELM_VERSION}.tar.gz" "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz" + curl -sSLo "bin/helm-${HELM_VERSION}.tar.gz" "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz" tar -xf "bin/helm-${HELM_VERSION}.tar.gz" --directory bin --strip-components 1 linux-amd64/helm rm "bin/helm-${HELM_VERSION}.tar.gz" mv bin/helm "bin/helm-${HELM_VERSION}" @@ -66,7 +66,7 @@ setup_helm () { setup_kubeval () { echo "setup kubeval ${KUBEVAL_VERSION}" if ! [ -f "bin/kubeval-${KUBEVAL_VERSION}" ]; then - curl -Lo "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" "https://github.com/instrumenta/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz" + curl -sSLo "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" "https://github.com/instrumenta/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz" tar -xf "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" --directory bin rm "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" mv bin/kubeval "bin/kubeval-${KUBEVAL_VERSION}" @@ -79,14 +79,14 @@ setup_git_crypt () { GIT_CRYPT_VERSION_SHA=46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b echo "setup git-crypt ${GIT_CRYPT_VERSION}" if ! [ -f "bin/git-crypt-${GIT_CRYPT_VERSION}" ]; then - curl -Lo "bin/git-crypt-${GIT_CRYPT_VERSION}" https://github.com/minrk/git-crypt-bin/releases/download/${GIT_CRYPT_VERSION}/git-crypt + curl -sSLo "bin/git-crypt-${GIT_CRYPT_VERSION}" https://github.com/minrk/git-crypt-bin/releases/download/${GIT_CRYPT_VERSION}/git-crypt chmod +x "bin/git-crypt-${GIT_CRYPT_VERSION}" echo "${GIT_CRYPT_VERSION_SHA} bin/git-crypt-${GIT_CRYPT_VERSION}" | shasum -a 256 -c - fi cp bin/git-crypt-${GIT_CRYPT_VERSION} bin/git-crypt } -if [ "$1" = "--setup" ]; then +if [ "${1:-}" = "--setup" ]; then setup_kubectl setup_kind setup_helm diff --git a/dev b/dev index 6730f9f56d..1b72f053d6 100755 --- a/dev +++ b/dev @@ -1,20 +1,5 @@ #!/usr/bin/env python3 """ -Checks that can be made: -1. Are we using the correct cluster? If not KUBECONFIG is set explicitly in an - env var or through an .env file, we could fail. -2. Are the required dependencies available on path or in the ./bin folder? - -Requirements: -- KUBECONFIG is set -- dev-config.yaml is used -- - -.env file: - GITHUB_ACCESS_TOKEN - for release changes and contributors - KUBECONFIG - for kind clusters - HELM_HOME - for plugins - CHARTPRESS_COMMIT_RANGE - ? """ import argparse @@ -55,9 +40,9 @@ def depend_on(binaries=[], envs=[]): print(f"- Env vars: {missing_envs}") print("") if missing_binaries: - print("Install and make the binaries available on your PATH!") + print("Install and make the binaries available on your PATH") if missing_envs: - print("Update your .env file!") + print("Update your .env file") sys.exit(1) else: return func(*args, **kwargs) @@ -68,8 +53,8 @@ def depend_on(binaries=[], envs=[]): @depend_on(binaries=["kind", "docker"], envs=["KUBE_VERSION"]) -def kind_start(recreate): - # check for a existing jh-dev cluster and conditionally delete it +def kind_create(recreate): + # check for a existing cluster kind_clusters = _run( cmd=["kind", "get", "clusters"], print_command=False, @@ -78,7 +63,7 @@ def kind_start(recreate): kind_cluster_exist = bool(re.search(r"\bjh-dev\b", kind_clusters)) if kind_cluster_exist: if recreate: - print("Deleting existing kind cluster named jh-dev.") + print("Deleting existing kind cluster") _run(["kind", "delete", "cluster", "--name", "jh-dev"]) else: # This workaround currently only works for single node clusters, @@ -91,16 +76,16 @@ def kind_start(recreate): capture_output=True, ) if not is_kind_cluster_container_running: - print("Starting up the existing kind cluster's node container.") + print("Starting up existing kind cluster") _run(["docker", "start", "jh-dev-control-plane"]) sys.exit(0) else: - print("The kind cluster was already started and running.") + print("The kind cluster was already created and running.") sys.exit(0) - # start a new cluster with a fixed name, kubernetes version - print('Creating kind cluster "jh-dev".') + # create and setup a new cluster + print('Creating kind cluster') _run([ "kind", "create", "cluster", "--name", "jh-dev", @@ -122,7 +107,7 @@ def kind_start(recreate): dotenv.set_key(".env", "KUBECONFIG", kubeconfig_path) os.environ["KUBECONFIG"] = kubeconfig_path - print('Making "jh-dev" the default namespace in the cluster.') + print('Setting default namespace') _run([ "kubectl", "config", "set-context", "--current", @@ -139,7 +124,6 @@ def kind_start(recreate): "kubectl", "apply", "-f", "https://docs.projectcalico.org/v3.10/manifests/calico.yaml", ], - print_end="", ) print("Waiting for Kubernetes nodes to become ready.") @@ -189,11 +173,11 @@ def kind_start(recreate): error_callback=_log_tiller_rollout_timeout, ) - print('Kind cluster "jh-dev" successfully setup!') + print('Kind cluster successfully setup!') @depend_on(binaries=["kind"], envs=[]) -def kind_stop(): +def kind_delete(): print('Deleting kind cluster "jh-dev".') _run(["kind", "delete", "cluster", "--name", "jh-dev"]) @@ -448,16 +432,16 @@ def _get_argparser(): ) kind_cmds = kind.add_subparsers(title="Commands", dest="sub_cmd") - kind_start = kind_cmds.add_parser( - "start", help="Start and initialize a kind Kubernetes cluster." + kind_create = kind_cmds.add_parser( + "create", help="Create and setup a kind Kubernetes cluster." ) - kind_start.add_argument( + kind_create.add_argument( "--recreate", action="store_true", - help="If the cluster is already started, delete it and start a new.", + help="If the cluster already exist, delete it and create a new.", ) - kind_stop = kind_cmds.add_parser( - "stop", help="Stop and delete a previously started kind Kubernetes cluster." + kind_delete = kind_cmds.add_parser( + "delete", help="Stop and delete a previously started kind Kubernetes cluster." ) upgrade = _cmds.add_parser( @@ -525,7 +509,7 @@ if __name__ == "__main__": ## potential modifications of non developer clusters. It should ## be to the path where the kubernetes config resides. ## - ## The "./dev.py kind start" command will set this files KUBECONFIG + ## The "./dev.py kind create" command will set this files KUBECONFIG ## entry automatically on cluster creation. ## KUBECONFIG= @@ -560,10 +544,10 @@ if __name__ == "__main__": # run suitable command and pass arguments if args.cmd == "kind": - if args.sub_cmd == "start": - kind_start(recreate=args.recreate) - if args.sub_cmd == "stop": - kind_stop() + if args.sub_cmd == "create": + kind_create(recreate=args.recreate) + if args.sub_cmd == "delete": + kind_delete() if args.cmd == "upgrade": upgrade(args.values) diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 77221f13ab..a0d0598171 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,5 +1,5 @@ name: jupyterhub -version: '0.9-dev' +version: '0.9-2a7e0af' appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org diff --git a/vagrant-vm-setup.sh b/vagrant-vm-root-setup.sh similarity index 85% rename from vagrant-vm-setup.sh rename to vagrant-vm-root-setup.sh index 8de197d41d..d79d0233fb 100644 --- a/vagrant-vm-setup.sh +++ b/vagrant-vm-root-setup.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -eu # Install pip @@ -7,7 +7,6 @@ set -eu # apt-get -q update apt-get -q install -y python3-pip -echo 'PATH=$PATH:~/.local/bin' >> /home/vagrant/.bashrc # Install Docker CE @@ -42,5 +41,5 @@ EOF sed -i -re "s/^(127.0.0.1\\s.+)/\\1 `hostname`/" /etc/hosts -# Put to be downloaded binaries on PATH -echo 'PATH=$PATH:~/zero-to-jupyterhub-k8s/bin' >> /home/vagrant/.bashrc +# Make additional setup steps as the vagrant user +su -c "source /home/vagrant/zero-to-jupyterhub-k8s/vagrant-vm-user-setup.sh" vagrant diff --git a/vagrant-vm-user-setup.sh b/vagrant-vm-user-setup.sh new file mode 100644 index 0000000000..ea3864266f --- /dev/null +++ b/vagrant-vm-user-setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Enter the mounted repo folder +cd ~/zero-to-jupyterhub-k8s + + +# Install Python dependencies and put them on PATH +pip3 install -r dev-requirements.txt +pip3 install -r doc/doc-requirements.txt +echo 'PATH=$PATH:~/.local/bin' >> ~/.bashrc + + +# Install binaries and put them on PATH +. ci/common --setup +echo 'PATH=$PATH:~/zero-to-jupyterhub-k8s/bin' >> ~/.bashrc + + +# Setup autocompletion +echo 'source <(kubectl completion bash)' >>~/.bashrc +echo 'source <(helm completion bash)' >>~/.bashrc From 6e0f758e41c8df3b4552141e4dc0ffb6823f53b3 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Sun, 13 Oct 2019 22:57:55 +0200 Subject: [PATCH 54/77] Rewrite of CONTRIBUTING.md etc. again --- CONTRIBUTING.md | 84 +++++++++++++++++++++++++++++++++++--------- ci/publish | 4 +-- dev | 12 ++----- dev-requirements.txt | 2 +- 4 files changed, 73 insertions(+), 29 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef515e403a..09d02af9e2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,8 @@ This is a [Jupyter](https://jupyter.org) project, so please start out by reading the first page of the general [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). + + ## Local development ### 1: Preparations @@ -33,32 +35,44 @@ work without a Virtual Machine. 1. Install Vagrant by [downloading and running an installer](https://www.vagrantup.com/downloads.html). -1. Start a prepared VM and SSH into it. +1. Start and automatically setup a VM. + + Use the `vagrant up` command to start the VM for the first time, or `vagrant + resume` to resume a suspended VM. ```shell - # if you have suspended a VM earlier, use "vagrat resume" instead vagrant up + # vagrant resume + ``` - # enter a SSH session with the VM - vagrant ssh +1. Enter the VM and the `~/zero-to-jupyterhub-k8s` folder. + + The `~/zero-to-jupyterhub-k8s` folder in the VM will be the exact same folder + on your actual machine. Change either and you influence both. - # relocate to the repository folder that is mounted from outside the VM - # IMPORTANT: changes to this folder will be seen outside your VM + ```shell + vagrant ssh cd zero-to-jupyterhub-k8s ``` 1. Do your development. -1. Exit and suspend the VM +1. Exit and suspend the VM. + + If you don't worry about using some disk space, suspending the VM with + `vagrant suspend` is a good option as compared to `halt` or `destroy` + commands. Suspending the VM will allow you to quickly get back to development + within the VM later. For more details see a [description about the + differences](https://www.vagrantup.com/intro/getting-started/teardown.html). ```shell - ## exit the SSH session + # exit the VM exit + + # suspend the VM vagrant suspend ``` - > **NOTE:** You can also use `vagrant destroy` to reset the VM state entirely. - #### b) Install tools yourself This is what you need to install and make available on your PATH. @@ -87,6 +101,8 @@ pytest --version chartpress --version ``` + + ### 2: Setup a Kubernetes cluster You will now need a Kubernetes cluster to work with, and we present you with two @@ -95,7 +111,6 @@ Kubernetes cluster for you using [`kind` (Kubernetes-in-Docker)](https://kind.sigs.k8s.io), or b), you start and setup your own Kubernetes cluster. - #### a) Automated use of `kind` ```shell @@ -115,13 +130,24 @@ your own Kubernetes cluster. context to use. This is enforced to ensure the script only works on a Kubernetes cluster it is intended to work on. + + ### 3: Install or upgrade your local Helm chart -You have two options as usual to install or upgrade your local Helm chart, -either you a) use the automated script to install or upgrade, or you do it on -your own. +This repository contains various `Dockerfile`s that are used to build docker +images in use by the Helm chart. To help us build these docker images only when +needed as well as update the Helm chart's [`values.yaml`](jupyterhub/values.yaml) +to use the latest available image, we rely on a command line tool called +[`chartpress`](https://github.com/jupyterhub/chartpress) that is installed as +part of [dev-requirements.txt](dev-requirements.txt). + +Chartpress is configured through [chartpress.yaml](chartpress.yaml), and will +only rebuild images if their dependent files in their respective directories or +chartpress.yaml itself has changed. -TODO: learn properly about the chartpress --commit-ranger flag. +Now you will be presented with two options as usual, either you a) use the +automated script to install or upgrade the Helm chart, or b) you do it on your +own. #### a) Automated Helm chart install or upgrade @@ -140,7 +166,7 @@ TODO: learn properly about the chartpress --commit-ranger flag. [values.yaml](jupyterhub/values.yaml) file with the appropriate image tags. ```shell - chartpress --commit-range origin/master..HEAD + chartpress ``` > **NOTE:** If you use a kind cluster and have built new images that will @@ -161,12 +187,36 @@ TODO: learn properly about the chartpress --commit-ranger flag. 1. Visit http://localhost:8080 + + ### 4: Run tests +To run the available tests, you can a) use the dev script or b) do it yourself +with `pytest`. Using the dev script you will be presented with useful debugging +information if a test fails, and you will be required to explicitly declare what +Kubernetes cluster to use in the `.env` file. This can help to avoid mistakenly +working towards the wrong Kubernetes cluster. + +> **NOTE:** If you haven't port-forwarded the `proxy-public` Kubernetes service +> to port `8080` on localhost (`127.0.0.1`), you will need to set the +> environment variables `PROXY_PUBLIC_SERVICE_HOST` and +> `PROXY_PUBLIC_SERVICE_PORT` respectively. If you use the dev script, you can +> set them in the `.env` file. + +#### a) Run tests with the dev script + ```shell ./dev test ``` +#### b) Run test manually + +```shell +pytest -v --exitfirst ./tests +``` + + + ## Debugging issues Various things can go wrong while working with the local development @@ -241,6 +291,8 @@ The reason you may run into an issue if is there is another service already listening on traffic arriving on a given port. Then you would need to either shut it down or route traffic differently. + + ## Helm chart practices We strive to follow the guidelines provided by diff --git a/ci/publish b/ci/publish index 1e2c9165d1..bda2addc29 100755 --- a/ci/publish +++ b/ci/publish @@ -17,9 +17,9 @@ set -x export GIT_SSH_COMMAND="ssh -i ${PWD}/ci/id_rsa" if [ "${TRAVIS_TAG:-}" == "" ]; then - chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart + chartpress --push --publish-chart else - chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart --tag "${TRAVIS_TAG}" + chartpress --push --publish-chart --tag "${TRAVIS_TAG}" fi # Let us log the changes chartpress did, it should include replacements for diff --git a/dev b/dev index 1b72f053d6..124055e9b9 100755 --- a/dev +++ b/dev @@ -189,10 +189,7 @@ def upgrade(values): "TRAVIS_COMMIT_RANGE", os.environ["CHARTPRESS_COMMIT_RANGE"] ) - _run([ - "chartpress", - "--commit-range", commit_range, - ]) + _run(["chartpress"]) # git --no-pager diff if "kind-config-jh-dev" in os.environ["KUBECONFIG"]: @@ -500,16 +497,11 @@ if __name__ == "__main__": ## GITHUB_ACCESS_TOKEN= # - ## CHARTPRESS_COMMIT_RANGE can help us avoids image rebuilds. If - ## the main repo remote isn't named origin, correct it here. - ## - CHARTPRESS_COMMIT_RANGE=origin/master..HEAD - # ## KUBECONFIG is required to be set explicitly in order to avoid ## potential modifications of non developer clusters. It should ## be to the path where the kubernetes config resides. ## - ## The "./dev.py kind create" command will set this files KUBECONFIG + ## The "./dev kind create" command will set this files KUBECONFIG ## entry automatically on cluster creation. ## KUBECONFIG= diff --git a/dev-requirements.txt b/dev-requirements.txt index d81d6fc874..e5ad3af1a4 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -21,6 +21,6 @@ requests ## yamllint>=1.17.0 -## ./dev.py use these +## ./dev use these colorama python-dotenv From 68cda0717e4453669b3b07d30bd2626bfca07a2e Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Sun, 13 Oct 2019 23:16:50 +0200 Subject: [PATCH 55/77] Remove all remnants of chartpress --commit-range flag See https://github.com/jupyterhub/chartpress/issues/48 --- dev | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/dev b/dev index 124055e9b9..556660c5e7 100755 --- a/dev +++ b/dev @@ -182,13 +182,9 @@ def kind_delete(): _run(["kind", "delete", "cluster", "--name", "jh-dev"]) -@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "CHARTPRESS_COMMIT_RANGE", "PROXY_PUBLIC_SERVICE_PORT"]) +@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "PROXY_PUBLIC_SERVICE_PORT"]) def upgrade(values): print("Building images and updating image tags if needed.") - commit_range = os.environ.get( - "TRAVIS_COMMIT_RANGE", - os.environ["CHARTPRESS_COMMIT_RANGE"] - ) _run(["chartpress"]) # git --no-pager diff From 327759bcb17c04692549ed26a827add029808a6c Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 00:04:14 +0200 Subject: [PATCH 56/77] Add back required daemonset/calico-node fix --- dev | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/dev b/dev index 556660c5e7..a2a5ddc1f2 100755 --- a/dev +++ b/dev @@ -124,6 +124,23 @@ def kind_create(recreate): "kubectl", "apply", "-f", "https://docs.projectcalico.org/v3.10/manifests/calico.yaml", ], + print_end="", + ) + + # NOTE: daemonset/calico-node pods' main container fails to start up without + # an additional environment variable configured to disable a check + # that we fail. + # + # env: + # - name: FELIX_IGNORELOOSERPF + # value: "true" + _run( + cmd=[ + "kubectl", "patch", "daemonset/calico-node", + "--namespace", "kube-system", + "--type", "json", + "--patch", '[{"op":"add", "path":"/spec/template/spec/containers/0/env/-", "value":{"name":"FELIX_IGNORELOOSERPF", "value":"true"}}]', + ], ) print("Waiting for Kubernetes nodes to become ready.") @@ -283,12 +300,12 @@ def _log_test_failure(): print_end="", ) _run( - cmd=["kubectl", "get", "logs", "deploy/hub",], + cmd=["kubectl", "logs", "deploy/hub",], exit_on_error=False, print_end="", ) _run( - cmd=["kubectl", "get", "logs", "deploy/proxy",], + cmd=["kubectl", "logs", "deploy/proxy",], exit_on_error=False, ) From a0ee570fa8264d132670945fc7c521bbb5667f41 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 00:13:39 +0200 Subject: [PATCH 57/77] Add back K8s 1.16 with fixed calico --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 262a155553..8d695baa0c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -58,7 +58,7 @@ env: ## ## ref: https://github.com/projectcalico/calico/issues/2915 ## - # - KUBE_VERSION=1.16.1 + - KUBE_VERSION=1.16.1 - KUBE_VERSION=1.15.3 - KUBE_VERSION=1.14.6 - KUBE_VERSION=1.13.10 From e02387a20812c296b0525afa56f56bdd4b0ed75e Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 00:26:54 +0200 Subject: [PATCH 58/77] Helm 2.15 is required for k8s 1.16 --- ci/common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/common b/ci/common index 2626251940..84d96bbe65 100755 --- a/ci/common +++ b/ci/common @@ -24,7 +24,7 @@ fi if [ -z ${HELM_VERSION:-} ]; then ## ref: https://github.com/helm/helm/releases ## - export HELM_VERSION=2.14.3 + export HELM_VERSION=2.15.0-rc.1 fi if [ -z ${KUBEVAL_VERSION:-} ]; then ## ref: https://github.com/instrumenta/kubeval/releases From f86e7c730e763706eece62d90d629650d732e244 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 00:32:41 +0200 Subject: [PATCH 59/77] Support helm 2.15 requirements --- jupyterhub/Chart.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index a0d0598171..3b8087828c 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,5 +1,6 @@ +apiVersion: v1 name: jupyterhub -version: '0.9-2a7e0af' +version: '0.9.0-dev' appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org From 41beb0114bf606f9750e39e9139f40fa5f65a948 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 01:00:24 +0200 Subject: [PATCH 60/77] Disable k8s 1.16 again and use old helm again --- .travis.yml | 2 +- ci/common | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8d695baa0c..e78c4688b9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -58,7 +58,7 @@ env: ## ## ref: https://github.com/projectcalico/calico/issues/2915 ## - - KUBE_VERSION=1.16.1 + # - KUBE_VERSION=1.16.1 (Required bumped Helm version, but that version currently errors) - KUBE_VERSION=1.15.3 - KUBE_VERSION=1.14.6 - KUBE_VERSION=1.13.10 diff --git a/ci/common b/ci/common index 84d96bbe65..147d8abff5 100755 --- a/ci/common +++ b/ci/common @@ -24,7 +24,10 @@ fi if [ -z ${HELM_VERSION:-} ]; then ## ref: https://github.com/helm/helm/releases ## - export HELM_VERSION=2.15.0-rc.1 + ## FIXME: Helm version 2.15.0-rc.1 errored with "Transport is closing", + ## but Kubernetes 1.16 requires a fix for tiller that is using the + ## apiVersion: extensions/v1beta1 still. + export HELM_VERSION=2.14.3 fi if [ -z ${KUBEVAL_VERSION:-} ]; then ## ref: https://github.com/instrumenta/kubeval/releases From 75761a31ce51cc195d58ea9b11dfa507f28381f6 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 09:45:41 +0200 Subject: [PATCH 61/77] Improve docstrings in test_spawn.py --- tests/conftest.py | 2 +- tests/test_spawn.py | 105 ++++++++++++++++++++++++++++---------------- 2 files changed, 68 insertions(+), 39 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index e4db1edf6a..35c483a94e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,7 +15,7 @@ def request_data(): with open(os.path.join(basedir, "dev-config.yaml")) as f: y = yaml.safe_load(f) token = y["hub"]["services"]["test"]["apiToken"] - host = os.environ.get("ROXY_PUBLIC_SERVICE_HOST", "127.0.0.1") + host = os.environ.get("PROXY_PUBLIC_SERVICE_HOST", "127.0.0.1") port = os.environ.get("PROXY_PUBLIC_SERVICE_PORT", "8080") return { "token": token, diff --git a/tests/test_spawn.py b/tests/test_spawn.py index 05095c7eb8..318d4728f6 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -19,69 +19,94 @@ def test_api(api_request): + """ + Tests the hub api's root endpoint (/). The hub's version should be returned. + + A typical jupyterhub logging response to this test: + + [I 2019-09-25 12:03:12.051 JupyterHub log:174] 200 GET /hub/api (test@127.0.0.1) 9.57ms + """ + print("asking for the hub's version") r = api_request.get("") assert r.status_code == 200 assert r.json().get("version", "version-missing") == jupyterhub_version - """kubectl logs deploy/hub - on a successful run - [I 2019-09-25 12:03:12.051 JupyterHub log:174] 200 GET /hub/api (test@127.0.0.1) 9.57ms - """ def test_api_info(api_request): + """ + Tests the hub api's /info endpoint. Information about the hub should be + returned. + + A typical jupyterhub logging response to this test: + + [I 2019-09-25 12:03:12.086 JupyterHub log:174] 200 GET /hub/api/info (test@127.0.0.1) 10.21ms + """ + print("asking for the hub information") r = api_request.get("/info") assert r.status_code == 200 result = r.json() assert result["spawner"]["class"] == "kubespawner.spawner.KubeSpawner" - """kubectl logs deploy/hub - on a successful run - [I 2019-09-25 12:03:12.086 JupyterHub log:174] 200 GET /hub/api/info (test@127.0.0.1) 10.21ms - """ - def test_hub_api_create_user_and_get_information_about_user(api_request, jupyter_user): - # NOTE: The jupyter user is created and commited to the hub database through - # the jupyter_user pytest fixture declared in conftest.py. Due to - # this, this first test is actually testing both the fixture to create - # the user, and the ability to get information from the hub about the - # user. - # - # Also note that the fixture will automatically clean up the - # user from the hub's database when the function exits. + """ + Tests the hub api's /users/:user endpoint, both POST and GET. + + A jupyter user is created and commited to the hub database through the + jupyter_user pytest fixture declared in conftest.py. Due to this, this first + test to use the jupyter_user fixture is actually testing both the pytest + fixture that creates the user as well as the ability to get information from + the hub about the user. The jupyter_user fixture will automatically clean up + the user from the hub's database when this function exits. + + A typical jupyterhub logging response to this test: + + [I 2019-09-25 12:03:12.126 JupyterHub log:174] 201 POST /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 20.74ms + [I 2019-09-25 12:03:12.153 JupyterHub log:174] 200 GET /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 11.91ms + [D 2019-09-25 12:03:12.180 JupyterHub user:240] Creating for testuser-7c70eb90-035b-4d9f-92a5-482e441e307d: + [I 2019-09-25 12:03:12.204 JupyterHub reflector:199] watching for pods with label selector='component=singleuser-server' in namespace jh-dev + [D 2019-09-25 12:03:12.205 JupyterHub reflector:202] Connecting pods watcher + [I 2019-09-25 12:03:12.229 JupyterHub reflector:199] watching for events with field selector='involvedObject.kind=Pod' in namespace jh-dev + [D 2019-09-25 12:03:12.229 JupyterHub reflector:202] Connecting events watcher + [I 2019-09-25 12:03:12.269 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 98.85ms + """ print("create a user, and get information about the user") r = api_request.get("/users/" + jupyter_user) assert r.status_code == 200 assert r.json()["name"] == jupyter_user - """kubectl logs deploy/hub - on a successful run - [I 2019-09-25 12:03:12.126 JupyterHub log:174] 201 POST /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 20.74ms - [I 2019-09-25 12:03:12.153 JupyterHub log:174] 200 GET /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 11.91ms - [D 2019-09-25 12:03:12.180 JupyterHub user:240] Creating for testuser-7c70eb90-035b-4d9f-92a5-482e441e307d: - [I 2019-09-25 12:03:12.204 JupyterHub reflector:199] watching for pods with label selector='component=singleuser-server' in namespace jh-dev - [D 2019-09-25 12:03:12.205 JupyterHub reflector:202] Connecting pods watcher - [I 2019-09-25 12:03:12.229 JupyterHub reflector:199] watching for events with field selector='involvedObject.kind=Pod' in namespace jh-dev - [D 2019-09-25 12:03:12.229 JupyterHub reflector:202] Connecting events watcher - [I 2019-09-25 12:03:12.269 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 98.85ms + +def test_hub_api_list_users(api_request, jupyter_user): """ + Tests the hub api's /users endpoint. Information about users should be + returned. + A typical jupyterhub logging response to this test: + + [I 2019-09-25 12:03:12.303 JupyterHub log:174] 201 POST /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 15.53ms + [I 2019-09-25 12:03:12.331 JupyterHub log:174] 200 GET /hub/api/users (test@127.0.0.1) 10.83ms + [D 2019-09-25 12:03:12.358 JupyterHub user:240] Creating for testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f: + [I 2019-09-25 12:03:12.365 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 18.44ms + """ -def test_hub_api_list_users(api_request, jupyter_user): print("create a test user, get information about all users, and find the test user") r = api_request.get("/users") assert r.status_code == 200 assert any(u["name"] == jupyter_user for u in r.json()) - """kubectl logs deploy/hub - on a successful run - [I 2019-09-25 12:03:12.303 JupyterHub log:174] 201 POST /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 15.53ms - [I 2019-09-25 12:03:12.331 JupyterHub log:174] 200 GET /hub/api/users (test@127.0.0.1) 10.83ms - [D 2019-09-25 12:03:12.358 JupyterHub user:240] Creating for testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f: - [I 2019-09-25 12:03:12.365 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 18.44ms + +def test_hub_can_talk_to_proxy(api_request, request_data): """ + Tests the hub api's /proxy endpoint. + A typical jupyterhub logging response to this test: + + [I 2019-09-25 12:03:12.395 JupyterHub log:174] 200 GET /hub/api/proxy (test@127.0.0.1) 13.48ms + """ -def test_hub_can_talk_to_proxy(api_request, request_data): endtime = time.time() + request_data["test_timeout"] while time.time() < endtime: try: @@ -94,12 +119,13 @@ def test_hub_can_talk_to_proxy(api_request, request_data): time.sleep(1) assert r.status_code == 200, "Failed to get /proxy" - """kubectl logs deploy/hub - on a successful run - [I 2019-09-25 12:03:12.395 JupyterHub log:174] 200 GET /hub/api/proxy (test@127.0.0.1) 13.48ms - """ - def test_hub_api_request_user_spawn(api_request, jupyter_user, request_data): + """ + Tests the hub api's /users/:user/server POST endpoint. A user pod should be + created. + """ + print("asking kubespawner to spawn a server for a test user") r = api_request.post("/users/" + jupyter_user + "/server") assert r.status_code in (201, 202) @@ -120,9 +146,12 @@ def test_hub_api_request_user_spawn(api_request, jupyter_user, request_data): def test_singleuser_netpol(api_request, jupyter_user, request_data): - print( - "asking kubespawner to spawn a server for a test user to test network policies" - ) + """ + Tests a spawned user pods ability to communicate with allowed and blocked + internet locations. + """ + + print("asking kubespawner to spawn a server for a test user to test network policies") r = api_request.post("/users/" + jupyter_user + "/server") assert r.status_code in (201, 202) try: From 95f9ce0b1bd641f0cc98f76eeed244ea855987ff Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 10:14:22 +0200 Subject: [PATCH 62/77] --context and --namespace enforced --- dev | 120 +++++++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 87 insertions(+), 33 deletions(-) diff --git a/dev b/dev index a2a5ddc1f2..f524e5c769 100755 --- a/dev +++ b/dev @@ -25,6 +25,8 @@ def depend_on(binaries=[], envs=[]): def decorator_depend_on(func): @functools.wraps(func) def wrapper_depend_on(*args, **kwargs): + # FEATURE: Allow prompt to set variables interactively. + missing_binaries = [] for binary in binaries: if shutil.which(binary) is None: @@ -52,7 +54,7 @@ def depend_on(binaries=[], envs=[]): return decorator_depend_on -@depend_on(binaries=["kind", "docker"], envs=["KUBE_VERSION"]) +@depend_on(binaries=["kubectl", "kind", "docker"], envs=["KUBE_VERSION"]) def kind_create(recreate): # check for a existing cluster kind_clusters = _run( @@ -101,17 +103,32 @@ def kind_create(recreate): print_command=False, capture_output=True, ) - - if os.environ["KUBECONFIG"] != kubeconfig_path: + if os.environ.get("KUBECONFIG", None) != kubeconfig_path: print(f'Updating your .env file\'s KUBECONFIG value to "{kubeconfig_path}"') dotenv.set_key(".env", "KUBECONFIG", kubeconfig_path) os.environ["KUBECONFIG"] = kubeconfig_path + kube_context = _run( + cmd=["kubectl", "config", "current-context"], + print_command=False, + capture_output=True, + ) + if os.environ.get("Z2JH_KUBE_CONTEXT", None) != kube_context: + print(f'Updating your .env file\'s Z2JH_KUBE_CONTEXT value to "{kube_context}"') + dotenv.set_key(".env", "Z2JH_KUBE_CONTEXT", kube_context) + os.environ["Z2JH_KUBE_CONTEXT"] = kube_context + + kube_namespace = os.environ.get("Z2JH_KUBE_NAMESPACE", "jh-dev") + if os.environ.get("Z2JH_KUBE_NAMESPACE", None) != kube_namespace: + print(f'Updating your .env file\'s Z2JH_KUBE_NAMESPACE value to "{kube_namespace}"') + dotenv.set_key(".env", "Z2JH_KUBE_NAMESPACE", kube_namespace) + os.environ["Z2JH_KUBE_NAMESPACE"] = kube_namespace + print('Setting default namespace') _run([ "kubectl", "config", "set-context", "--current", - "--namespace", "jh-dev", + "--namespace", kube_namespace, ]) @@ -199,7 +216,7 @@ def kind_delete(): _run(["kind", "delete", "cluster", "--name", "jh-dev"]) -@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "PROXY_PUBLIC_SERVICE_PORT"]) +@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE", "PROXY_PUBLIC_SERVICE_PORT"]) def upgrade(values): print("Building images and updating image tags if needed.") _run(["chartpress"]) @@ -217,7 +234,8 @@ def upgrade(values): cmd = [ "helm", "upgrade", "jh-dev", "./jupyterhub", "--install", - "--namespace", "jh-dev", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--kube-context", os.environ["Z2JH_KUBE_CONTEXT"], ] for value in values: cmd.append("--values") @@ -229,25 +247,34 @@ def upgrade(values): cmd=[ "kubectl", "rollout", "status", "deployment/proxy", "--timeout", "2m", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ], print_end="" ) _run([ "kubectl", "rollout", "status", "deployment/hub", "--timeout", "2m", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ]) + +@depend_on(binaries=["kubectl"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE", "PROXY_PUBLIC_SERVICE_PORT"]) +def port_forward(): print("Run and forget about port-forwarding.") _run( cmd=[ "kubectl", "port-forward", "service/proxy-public", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], f"{os.environ['PROXY_PUBLIC_SERVICE_PORT']}:80", ], forget=True, ) -@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "PROXY_PUBLIC_SERVICE_HOST", "PROXY_PUBLIC_SERVICE_PORT"]) +@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE", "PROXY_PUBLIC_SERVICE_HOST", "PROXY_PUBLIC_SERVICE_PORT"]) def test(): _run( cmd=["pytest", "-v", "--exitfirst", "./tests"], @@ -255,7 +282,13 @@ def test(): ) print("Tests succeeded!") - _run(cmd=["kubectl", "get", "pods"]) + _run( + cmd=[ + "kubectl", "get", "pods", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ] + ) @depend_on(binaries=["helm", "yamllint", "kubeval"], envs=[]) @@ -285,27 +318,47 @@ def changelog(): def _log_test_failure(): print("A test failed, let's debug!") _run( - cmd=["kubectl", "describe", "nodes",], + cmd=[ + "kubectl", "describe", "nodes", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ], exit_on_error=False, print_end="", ) _run( - cmd=["kubectl", "get", "pods",], + cmd=[ + "kubectl", "get", "pods", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ], exit_on_error=False, print_end="", ) _run( - cmd=["kubectl", "get", "events",], + cmd=[ + "kubectl", "get", "events", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ], exit_on_error=False, print_end="", ) _run( - cmd=["kubectl", "logs", "deploy/hub",], + cmd=[ + "kubectl", "logs", "deploy/hub", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ], exit_on_error=False, print_end="", ) _run( - cmd=["kubectl", "logs", "deploy/proxy",], + cmd=[ + "kubectl", "logs", "deploy/proxy", + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ], exit_on_error=False, ) @@ -313,7 +366,10 @@ def _log_test_failure(): def _log_tiller_rollout_timeout(): print("Helm's tiller never became ready!") _run( - cmd=["kubectl", "describe", "nodes",], + cmd=[ + "kubectl", "describe", "nodes", + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + ], exit_on_error=False, print_end="", ) @@ -321,6 +377,7 @@ def _log_tiller_rollout_timeout(): cmd=[ "kubectl", "describe", "deployment/tiller", "--namespace", "kube-system", + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ], exit_on_error=False, print_end="", @@ -329,6 +386,7 @@ def _log_tiller_rollout_timeout(): cmd=[ "kubectl", "logs", "deployment/tiller", "--namespace", "kube-system", + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ], exit_on_error=False, ) @@ -336,39 +394,28 @@ def _log_tiller_rollout_timeout(): def _log_wait_node_timeout(): print("Kubernetes nodes never became ready") - _run( - cmd=["kubectl", "describe", "nodes",], - exit_on_error=False, - print_end="", - ) _run( cmd=[ - "kubectl", "describe", "calico-etcd", - "--namespace", "kube-system", + "kubectl", "describe", "nodes", + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ], exit_on_error=False, print_end="", ) _run( cmd=[ - "kubectl", "logs", "calico-etcd", + "kubectl", "describe", "daemonset/calico-node", "--namespace", "kube-system", + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ], exit_on_error=False, print_end="", ) _run( cmd=[ - "kubectl", "describe", "calico-node", - "--namespace", "kube-system", - ], - exit_on_error=False, - print_end="", - ) - _run( - cmd=[ - "kubectl", "logs", "calico-node", + "kubectl", "logs", "daemonset/calico-node", "--namespace", "kube-system", + "--context", os.environ["Z2JH_KUBE_CONTEXT"], ], exit_on_error=False, ) @@ -465,6 +512,10 @@ def _get_argparser(): help="A Helm values file, this argument can be passed multiple times.", ) + port_forward = _cmds.add_parser( + "port-forward", help="Start port-forwarding of the deployed Helm chart's proxy-public service in a detached process." + ) + test = _cmds.add_parser( "test", help="Run tests on the deployed Helm chart in the Kubernetes cluster." ) @@ -525,13 +576,13 @@ if __name__ == "__main__": ## ## ref: https://hub.docker.com/r/kindest/node/tags ## - # KUBE_VERSION=1.15.3 + KUBE_VERSION=1.15.3 # ## VALIDATE_KUBE_VERSIONS is used when you check your Helm ## templates. Are the generated Kubernetes resources valid ## resources for these Kubernetes versions? ## - # VALIDATE_KUBE_VERSIONS=1.14.0,1.15.0 + VALIDATE_KUBE_VERSIONS=1.15.0 # ## PROXY_PUBLIC_SERVICE_HOST and PROXY_PUBLIC_SERVICE_PORT allow ## you to run the tests if you have used kubectl to port forward @@ -557,6 +608,9 @@ if __name__ == "__main__": if args.cmd == "upgrade": upgrade(args.values) + if args.cmd == "port-forward": + port_forward() + if args.cmd == "test": test() From d0d090497f3a2209d89b84079ad89e96e3498355 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 11:18:44 +0200 Subject: [PATCH 63/77] Fix --context --namespace bugs and port-forwarding --- .travis.yml | 1 + dev | 96 +++++++++++++++++++++++------------------------ tests/conftest.py | 4 +- 3 files changed, 51 insertions(+), 50 deletions(-) diff --git a/.travis.yml b/.travis.yml index e78c4688b9..1cd4fb94dd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,6 +39,7 @@ script: - ./ci/travis-docker-fix - ./dev kind create - ./dev upgrade + - ./dev port-forward - ./dev test env: ## NOTE: The environment variables will be expanded to multiple jobs. For diff --git a/dev b/dev index f524e5c769..f5ac80961c 100755 --- a/dev +++ b/dev @@ -118,17 +118,16 @@ def kind_create(recreate): dotenv.set_key(".env", "Z2JH_KUBE_CONTEXT", kube_context) os.environ["Z2JH_KUBE_CONTEXT"] = kube_context - kube_namespace = os.environ.get("Z2JH_KUBE_NAMESPACE", "jh-dev") - if os.environ.get("Z2JH_KUBE_NAMESPACE", None) != kube_namespace: - print(f'Updating your .env file\'s Z2JH_KUBE_NAMESPACE value to "{kube_namespace}"') - dotenv.set_key(".env", "Z2JH_KUBE_NAMESPACE", kube_namespace) - os.environ["Z2JH_KUBE_NAMESPACE"] = kube_namespace + if not os.environ.get("Z2JH_KUBE_NAMESPACE", None): + print(f'Updating your .env file\'s Z2JH_KUBE_NAMESPACE value to "jh-dev"') + dotenv.set_key(".env", "Z2JH_KUBE_NAMESPACE", "jh-dev") + os.environ["Z2JH_KUBE_NAMESPACE"] = "jh-dev" print('Setting default namespace') _run([ "kubectl", "config", "set-context", "--current", - "--namespace", kube_namespace, + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], ]) @@ -216,7 +215,7 @@ def kind_delete(): _run(["kind", "delete", "cluster", "--name", "jh-dev"]) -@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE", "PROXY_PUBLIC_SERVICE_PORT"]) +@depend_on(binaries=["chartpress", "helm"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE"]) def upgrade(values): print("Building images and updating image tags if needed.") _run(["chartpress"]) @@ -260,7 +259,7 @@ def upgrade(values): ]) -@depend_on(binaries=["kubectl"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE", "PROXY_PUBLIC_SERVICE_PORT"]) +@depend_on(binaries=["kubectl"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE"]) def port_forward(): print("Run and forget about port-forwarding.") _run( @@ -268,13 +267,14 @@ def port_forward(): "kubectl", "port-forward", "service/proxy-public", "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], "--context", os.environ["Z2JH_KUBE_CONTEXT"], - f"{os.environ['PROXY_PUBLIC_SERVICE_PORT']}:80", + "--address", os.environ.get("Z2JH_PORT_FORWARD_ADDRESS", "localhost"), + f'{os.environ.get("Z2JH_PORT_FORWARD_PORT", "8080")}:80', ], forget=True, ) -@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE", "PROXY_PUBLIC_SERVICE_HOST", "PROXY_PUBLIC_SERVICE_PORT"]) +@depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE"]) def test(): _run( cmd=["pytest", "-v", "--exitfirst", "./tests"], @@ -291,15 +291,11 @@ def test(): ) -@depend_on(binaries=["helm", "yamllint", "kubeval"], envs=[]) +@depend_on(binaries=["helm", "yamllint", "kubeval"], envs=["VALIDATE_KUBE_VERSIONS"]) def check_templates(): - kubernetes_versions = None - kubernetes_versions = kubernetes_versions or os.environ.get("VALIDATE_KUBE_VERSIONS", None) - kubernetes_versions = kubernetes_versions or os.environ.get("KUBE_VERSION", None) - _run([ "python3", "tools/templates/lint-and-validate.py", - "--kubernetes-versions", kubernetes_versions, + "--kubernetes-versions", os.environ["VALIDATE_KUBE_VERSIONS"], ]) @@ -554,43 +550,47 @@ if __name__ == "__main__": if not os.path.exists(".env"): default_dotenv_file = textwrap.dedent( """\ - ## Environment variables loaded and used by the ./dev script. They - ## will take precedence over system variables. + # Environment variables loaded and used by the ./dev script. They + # will take precedence over system variables. + # ----------------------------------------------------------------- + # + # GITHUB_ACCESS_TOKEN is needed to generate changelog entries etc. + # + GITHUB_ACCESS_TOKEN="" + # + # KUBECONFIG is required to be set explicitly in order to avoid + # potential modifications of non developer clusters. It should + # be to the path where the kubernetes config resides. + # + # The "./dev kind create" command will set this files KUBECONFIG + # entry automatically on cluster creation. + KUBECONFIG="" + # + # Z2JH_KUBE_CONTEXT and Z2JH_KUBE_NAMESPACE is used to ensure we + # work with the right cluster, with the right credentials, and in + # the right namespace without modifying the provided KUBECONFIG. + Z2JH_KUBE_CONTEXT="" + Z2JH_KUBE_NAMESPACE="" + + # KUBE_VERSION is used to create a kind cluster. Note that only + # versions that are found on kindest/node can be used. # - ## GITHUB_ACCESS_TOKEN is needed to generate changelog entries etc. - ## - GITHUB_ACCESS_TOKEN= + # ref: https://hub.docker.com/r/kindest/node/tags + KUBE_VERSION="1.15.3" # - ## KUBECONFIG is required to be set explicitly in order to avoid - ## potential modifications of non developer clusters. It should - ## be to the path where the kubernetes config resides. - ## - ## The "./dev kind create" command will set this files KUBECONFIG - ## entry automatically on cluster creation. - ## - KUBECONFIG= + # VALIDATE_KUBE_VERSIONS is influences "./dev check templates", + # what Kubernetes versions do we validate against? Note that only + # versions found on instrumenta/kubernetes-json-schema can be used. # - ## KUBE_VERSION is used to create a kind cluster and as a fallback - ## if you have not specified VALIDATE_KUBE_VERSIONS. Note that only - ## versions that are found on kindest/node can be used. - ## - ## ref: https://hub.docker.com/r/kindest/node/tags - ## - KUBE_VERSION=1.15.3 + # ref: https://github.com/instrumenta/kubernetes-json-schema + VALIDATE_KUBE_VERSIONS="1.15.0" # - ## VALIDATE_KUBE_VERSIONS is used when you check your Helm - ## templates. Are the generated Kubernetes resources valid - ## resources for these Kubernetes versions? - ## - VALIDATE_KUBE_VERSIONS=1.15.0 + # Z2JH_PORT_FORWARD_ADDRESS and Z2JH_PORT_FORWARD_PORT influences + # "./dev port-forward" and where "./dev test" will look to access + # the proxy-public Kubernetes service. # - ## PROXY_PUBLIC_SERVICE_HOST and PROXY_PUBLIC_SERVICE_PORT allow - ## you to run the tests if you have used kubectl to port forward - ## the proxy-public Kubernetes service manually with a custom - ## port or host ip. - ## - PROXY_PUBLIC_SERVICE_HOST=127.0.0.1 - PROXY_PUBLIC_SERVICE_PORT=8080 + Z2JH_PORT_FORWARD_ADDRESS="localhost" + Z2JH_PORT_FORWARD_PORT="8080" """ ) with open('.env', 'w+') as f: diff --git a/tests/conftest.py b/tests/conftest.py index 35c483a94e..bbd705a4aa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,8 +15,8 @@ def request_data(): with open(os.path.join(basedir, "dev-config.yaml")) as f: y = yaml.safe_load(f) token = y["hub"]["services"]["test"]["apiToken"] - host = os.environ.get("PROXY_PUBLIC_SERVICE_HOST", "127.0.0.1") - port = os.environ.get("PROXY_PUBLIC_SERVICE_PORT", "8080") + host = os.environ.get("Z2JH_PORT_FORWARD_ADDRESS", "localhost") + port = os.environ.get("Z2JH_PORT_FORWARD_PORT", "8080") return { "token": token, "hub_url": f'http://{host}:{port}/hub/api', From 30302eecd7ed2bf3294ac5a38c496a9252445d6b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 11:23:43 +0200 Subject: [PATCH 64/77] Converting comment to dosctring --- tests/conftest.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index bbd705a4aa..66c23f09fb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,8 @@ -## conftest.py has a special meaning to pytest -## ref: https://docs.pytest.org/en/latest/writing_plugins.html#conftest-py-plugins -## +"""conftest.py has a special meaning to pytest + +ref: https://docs.pytest.org/en/latest/writing_plugins.html#conftest-py-plugins +""" + import os import requests import uuid From c4e2acb8b8db77e8fa2fbb3601bc4bf23b2a2a7a Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Mon, 14 Oct 2019 15:13:52 +0200 Subject: [PATCH 65/77] Systematic use of port 8080 --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 056c233b98..488eebb917 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -13,7 +13,7 @@ Vagrant.configure("2") do |config| lv.cpus = 2 end if Vagrant.has_plugin?('vagrant-libvirt') - config.vm.network "forwarded_port", guest: 8080, host: 8090 + config.vm.network "forwarded_port", guest: 8080, host: 8080 config.vm.provision "shell", path: "vagrant-vm-root-setup.sh" config.vm.synced_folder ".", "/home/vagrant/zero-to-jupyterhub-k8s" From c9c7ad61770b4947d676a03182d22b559af5fa9f Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Tue, 15 Oct 2019 14:14:21 +0200 Subject: [PATCH 66/77] Py37 for Windows, make ./dev port-forwarding provide info --- .travis.yml | 2 +- CONTRIBUTING.md | 82 +++++++++++++++++++++++++++++++++++---------- dev | 66 +++++++++++++++++++++++------------- tests/test_spawn.py | 42 ++++++++--------------- 4 files changed, 121 insertions(+), 71 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1cd4fb94dd..0826631223 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ dist: bionic language: python python: - - 3.6 + - 3.7 git: ## depth set to false overrides travis default behavior to use shallow clones ## with depth 50 that can cause issues diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 09d02af9e2..be04606315 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -82,7 +82,7 @@ This is what you need to install and make available on your PATH. - [helm](https://helm.sh/docs/using_helm/#installing-helm) - [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) - [kubeval](https://kubeval.instrumenta.dev/installation/) -- Python 3.6+ ([Anaconda.com](https://www.anaconda.com/distribution/), [Python.org](https://www.python.org/downloads/)) +- Python 3.7+ ([Anaconda.com](https://www.anaconda.com/distribution/), [Python.org](https://www.python.org/downloads/)) - Python dependencies installed - `dev-requirements.txt` - `doc/doc-requirements.txt` @@ -151,14 +151,12 @@ own. #### a) Automated Helm chart install or upgrade -1. Install or upgrade your local Helm chart +1. Install or upgrade your local Helm chart. ```shell ./dev upgrade ``` -1. Visit http://localhost:8080 - #### b) Manual Helm chart install or upgrade 1. Use [`chartpress`](https://github.com/jupyterhub/chartpress) to rebuild @@ -179,29 +177,49 @@ own. helm upgrade jh-dev ./jupyterhub --install --namespace jh-dev ``` -1. Use `kubectl` to open up a network path to your cluster. - ```shell - kubectl port-forward --namespace jh-dev service/proxy-public 8080:80 - ``` -1. Visit http://localhost:8080 +### 4: Setup network access + +In order for you to access jupyterhub and a spawned user server, you need to be +able to access the Kubernetes service in this Helm chart called proxy-public. +While pods in the cluster can do this easily, your computer isn't a pod in the +cluster. What we can do is to dedicate a port on your computer to go towards the +proxy-public service of the Kubernetes cluster using `kubectl port-forward`. + +When you run `kubectl port-forward` you will get a process that keeps running +and you need to open an new terminal window alongside it, unless you detach this +process. The `./dev port-forward` script will detach the process, and respect +the environment two variables, `Z2JH_PORT_FORWARD_ADDRESS` and +`Z2JH_PORT_FORWARD_PORT`, that you can set with the `.env` file. + +#### a) Using dev script + +```shell +./dev port-forward +``` + +#### b) Using kubectl directly +```shell +kubectl port-forward --namespace jh-dev service/proxy-public 8080:80 +``` -### 4: Run tests + +### 5: Run tests To run the available tests, you can a) use the dev script or b) do it yourself -with `pytest`. Using the dev script you will be presented with useful debugging +with `pytest`. Using the dev script, you will be presented with useful debugging information if a test fails, and you will be required to explicitly declare what -Kubernetes cluster to use in the `.env` file. This can help to avoid mistakenly -working towards the wrong Kubernetes cluster. +Kubernetes cluster to use in the `.env` file. This can help you avoid a mistake +of working with the wrong Kubernetes cluster. > **NOTE:** If you haven't port-forwarded the `proxy-public` Kubernetes service -> to port `8080` on localhost (`127.0.0.1`), you will need to set the -> environment variables `PROXY_PUBLIC_SERVICE_HOST` and -> `PROXY_PUBLIC_SERVICE_PORT` respectively. If you use the dev script, you can -> set them in the `.env` file. +> on `localhost` to port `8080` as is the default, you will need to set the +> environment variables `Z2JH_PORT_FORWARD_ADDRESS` and `Z2JH_PORT_FORWARD_PORT` +> respectively. If you run `./dev test`, you need to set them in the `.env` +> file. #### a) Run tests with the dev script @@ -209,7 +227,7 @@ working towards the wrong Kubernetes cluster. ./dev test ``` -#### b) Run test manually +#### b) Run test with pytest directly ```shell pytest -v --exitfirst ./tests @@ -253,6 +271,34 @@ As you may notice, typical keywords associated with network errors are: - *timeout* - *no route to host* +#### kind load docker-image issues + +This is an error I got on Ubuntu using docker version 18.06.1-ce. I upgraded to +use a newer version of docker and has not experienced it since. + +``` +$ python3 ci/kind-load-docker-images.py --kind-cluster jh-dev +Error: exit status 1 +`kind load docker-image --name jh-dev jupyterhub/k8s-hub:0.8.0_241-4be955c8` exited with status 1 +`python3 ci/kind-load-docker-images.py --kind-cluster jh-dev` errored (1) + +$ kind load docker-image --name jh-dev jupyterhub/k8s-hub:0.8.0_241-4be955c8 +Error: exit status 1 + +$ kind load docker-image --name jh-dev jupyterhub/k8s-hub:0.8.0_241-4be955c8 --loglevel DEBUG +DEBU[00:46:57] Running: /snap/bin/docker [docker image inspect -f {{ .Id }} jupyterhub/k8s-hub:0.8.0_241-4be955c8] +DEBU[00:46:57] Running: /snap/bin/docker [docker ps -q -a --no-trunc --filter label=io.k8s.sigs.kind.cluster --format {{.Names}}\t{{.Label "io.k8s.sigs.kind.cluster"}}] +DEBU[00:46:57] Running: /snap/bin/docker [docker ps -q -a --no-trunc --filter label=io.k8s.sigs.kind.cluster --format {{.Names}}\t{{.Label "io.k8s.sigs.kind.cluster"}} --filter label=io.k8s.sigs.kind.cluster=jh-dev] +DEBU[00:46:57] Running: /snap/bin/docker [docker inspect -f {{index .Config.Labels "io.k8s.sigs.kind.role"}} jh-dev-control-plane] +DEBU[00:46:57] Running: /snap/bin/docker [docker exec --privileged jh-dev-control-plane crictl inspecti jupyterhub/k8s-hub:0.8.0_241-4be955c8] +DEBU[00:46:57] Image: "jupyterhub/k8s-hub:0.8.0_241-4be955c8" with ID "sha256:49a728c14a0f1d8cba40071f7bf2c173d03acd8c04fce828fea6b9dcb9805145" not present on node "jh-dev-control-plane" +DEBU[00:46:57] Running: /snap/bin/docker [docker save -o /tmp/image-tar149196292/image.tar jupyterhub/k8s-hub:0.8.0_241-4be955c8] +Error: exit status 1 + +$ docker save -o /tmp/image-tar149196292/image.tar jupyterhub/k8s-hub:0.8.0_241-4be955c8 +failed to save image: unable to validate output path: directory "/tmp/image-tar149196292" does not exist +``` + #### Unable to listen on port Did you get an error like this? diff --git a/dev b/dev index f5ac80961c..7016f26680 100755 --- a/dev +++ b/dev @@ -7,10 +7,12 @@ import functools import os import pipes import re +import requests import shutil import subprocess import sys import textwrap +import time import dotenv import colorama @@ -59,7 +61,7 @@ def kind_create(recreate): # check for a existing cluster kind_clusters = _run( cmd=["kind", "get", "clusters"], - print_command=False, + print_command=True, capture_output=True, ) kind_cluster_exist = bool(re.search(r"\bjh-dev\b", kind_clusters)) @@ -261,17 +263,36 @@ def upgrade(values): @depend_on(binaries=["kubectl"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE"]) def port_forward(): - print("Run and forget about port-forwarding.") - _run( + host = os.environ.get("Z2JH_PORT_FORWARD_ADDRESS", "localhost") + port = os.environ.get("Z2JH_PORT_FORWARD_PORT", "8080") + hub_api_url = f"http://{host}:{port}" + + print("Run and detach a process to run the kubectl port-forward command.") + proc = _run( cmd=[ "kubectl", "port-forward", "service/proxy-public", "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], "--context", os.environ["Z2JH_KUBE_CONTEXT"], - "--address", os.environ.get("Z2JH_PORT_FORWARD_ADDRESS", "localhost"), - f'{os.environ.get("Z2JH_PORT_FORWARD_PORT", "8080")}:80', + "--pod-running-timeout", "1s", + "--address", host, + f'{port}:80', ], - forget=True, - ) + detach=True, + ) + + try: + response = requests.get(hub_api_url, timeout=1.05) + except requests.exceptions.ConnectionError as e: + # this is a signature of a failed port forwarding + print("Port-forwarding failed!") + sys.exit(1) + except requests.exceptions.Timeout as e: + # this is a signature of successful port forwarding with services not responding + print("Port-forwarding seems to work but the proxy pod didn't respond quickly.") + else: + # this is a signature of a successful web response + print("Port-forwarding success!") + @depend_on(binaries=["kubectl", "pytest"], envs=["KUBECONFIG", "Z2JH_KUBE_CONTEXT", "Z2JH_KUBE_NAMESPACE"]) @@ -428,27 +449,25 @@ def _print_command(text): colorama.Style.DIM ) -def _run(cmd, forget=False, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): +def _run(cmd, detach=False, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): """Run a subcommand and exit if it fails""" - if kwargs.get("capture_output", None): - # FIXME: This is a workaround for Python 3.6 that won't be required in - # Python 3.7. - del kwargs["capture_output"] - kwargs["stdout"] = kwargs["stderr"] = subprocess.PIPE + if kwargs.get("capture_output", None) and kwargs.get("text", None) is None: + kwargs["text"] = True if print_command: _print_command(" ".join(map(pipes.quote, cmd))) - if forget: - # Call and forget this process + if detach: + # Call and detach the new process with open(os.devnull, 'r+b', 0) as DEVNULL: proc = subprocess.Popen( cmd, - stdin=DEVNULL, - stdout=DEVNULL, - stderr=DEVNULL, + text=True, + stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, close_fds=True, ) - return + return proc else: # This call will await completion proc = subprocess.run(cmd, **kwargs) @@ -461,16 +480,15 @@ def _run(cmd, forget=False, print_command=True, print_end="\n", print_error=True file=sys.stderr, ) if proc.stderr: - print(proc.stderr.decode("utf-8").strip()) + print(proc.stderr.strip()) if error_callback: error_callback() if exit_on_error: sys.exit(proc.returncode) if proc.stdout: - return proc.stdout.decode("utf-8").strip() - elif kwargs.get("stdout", None): - return "" + return proc.stdout.strip() + return "" def _get_argparser(): @@ -509,7 +527,7 @@ def _get_argparser(): ) port_forward = _cmds.add_parser( - "port-forward", help="Start port-forwarding of the deployed Helm chart's proxy-public service in a detached process." + "port-forward", help="Run kubectl port-forward on the deployed Helm chart's proxy-public service in a detached process." ) test = _cmds.add_parser( diff --git a/tests/test_spawn.py b/tests/test_spawn.py index 318d4728f6..640c811af2 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -166,36 +166,22 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): allowed_url = "http://jupyter.org" blocked_url = "http://mybinder.org" - c = subprocess.run( - [ - "kubectl", - "--namespace=jh-dev", - "exec", - pod_name, - "--", - "wget", - "--quiet", - "--tries=1", - "--timeout=5", - allowed_url, - ] - ) + c = subprocess.run([ + "kubectl", "exec", pod_name, + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + "--", + "wget", "--quiet", "--tries=1", "--timeout=3", allowed_url, + ]) assert c.returncode == 0, "Unable to get allowed domain" - c = subprocess.run( - [ - "kubectl", - "--namespace=jh-dev", - "exec", - pod_name, - "--", - "wget", - "--quiet", - "--tries=1", - "--timeout=5", - blocked_url, - ] - ) + c = subprocess.run([ + "kubectl", "exec", pod_name, + "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], + "--context", os.environ["Z2JH_KUBE_CONTEXT"], + "--", + "wget", "--quiet", "--tries=1", "--timeout=3", blocked_url, + ]) assert c.returncode > 0, "Blocked domain was allowed" finally: From 9a0d4694b19ab76c4351d334e1323ed33f61fe7f Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 02:27:33 +0200 Subject: [PATCH 67/77] Return to Python 3.6 since it was troublesome with the VM --- .travis.yml | 2 +- CONTRIBUTING.md | 7 +++++++ dev | 19 ++++++++++++------- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0826631223..1cd4fb94dd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ dist: bionic language: python python: - - 3.7 + - 3.6 git: ## depth set to false overrides travis default behavior to use shallow clones ## with depth 50 that can cause issues diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be04606315..d84d14557c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -307,6 +307,13 @@ Did you get an error like this? Unable to listen on port 8080: Listeners failed to create with the following errors: [Unable to create listener: Error listen tcp4 127.0.0.1:8080: bind: address already in use Unable to create listener: Error listen tcp6 [::1]:8080: bind: address already in use] ``` +``` +Vagrant cannot forward the specified ports on this VM, since they +would collide with some other application that is already listening +on these ports. The forwarded port to 8080 is already in use +on the host machine. +``` + The key to solving this is understanding it! We need to shuttle traffic from your computer to your Kubernetes clusters's diff --git a/dev b/dev index 7016f26680..0676781391 100755 --- a/dev +++ b/dev @@ -273,7 +273,7 @@ def port_forward(): "kubectl", "port-forward", "service/proxy-public", "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], "--context", os.environ["Z2JH_KUBE_CONTEXT"], - "--pod-running-timeout", "1s", + "--pod-running-timeout", "10s", "--address", host, f'{port}:80', ], @@ -281,7 +281,7 @@ def port_forward(): ) try: - response = requests.get(hub_api_url, timeout=1.05) + response = requests.get(hub_api_url, timeout=10.05) except requests.exceptions.ConnectionError as e: # this is a signature of a failed port forwarding print("Port-forwarding failed!") @@ -451,8 +451,14 @@ def _print_command(text): def _run(cmd, detach=False, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): """Run a subcommand and exit if it fails""" - if kwargs.get("capture_output", None) and kwargs.get("text", None) is None: - kwargs["text"] = True + if kwargs.get("capture_output", None): + if kwargs.get("text", None) is None: + kwargs["text"] = True + + # FIXME: This following lines are a removable workaround at the time we + # assume Python 3.7+, but it is required for Python 3.6. + del kwargs["capture_output"] + kwargs["stdout"] = kwargs["stderr"] = subprocess.PIPE if print_command: _print_command(" ".join(map(pipes.quote, cmd))) @@ -461,7 +467,6 @@ def _run(cmd, detach=False, print_command=True, print_end="\n", print_error=True with open(os.devnull, 'r+b', 0) as DEVNULL: proc = subprocess.Popen( cmd, - text=True, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, @@ -480,14 +485,14 @@ def _run(cmd, detach=False, print_command=True, print_end="\n", print_error=True file=sys.stderr, ) if proc.stderr: - print(proc.stderr.strip()) + print(proc.stderr.decode("utf-8").strip()) if error_callback: error_callback() if exit_on_error: sys.exit(proc.returncode) if proc.stdout: - return proc.stdout.strip() + return proc.stdout.decode("utf-8").strip() return "" From 047daec132357649e8bab34b1c43a1ff48e7af8f Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 02:39:37 +0200 Subject: [PATCH 68/77] Update debugging notes of port clashes --- CONTRIBUTING.md | 51 +++++++++++++++++++++++++------------------------ dev | 9 ++++++--- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d84d14557c..5f25797fce 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -307,42 +307,43 @@ Did you get an error like this? Unable to listen on port 8080: Listeners failed to create with the following errors: [Unable to create listener: Error listen tcp4 127.0.0.1:8080: bind: address already in use Unable to create listener: Error listen tcp6 [::1]:8080: bind: address already in use] ``` -``` -Vagrant cannot forward the specified ports on this VM, since they -would collide with some other application that is already listening -on these ports. The forwarded port to 8080 is already in use -on the host machine. -``` - The key to solving this is understanding it! We need to shuttle traffic from your computer to your Kubernetes clusters's Service that in turn shuttle the traffic to the pod of relevance. While doing -so, we can end up with issues like the one above if we end up asking for traffic -to go to more than one place. +so, we can end up with issues like the ones above. They arise because we have +asked for traffic to go to more than one place. Let's look on how we need traffic to be shuttled! -1. *Traffic entering your computer should go to your VM.* +*Traffic entering your computer should go to your Kubernetes cluster's +Service named `proxy-public`.* - When you run `vagrant up` your computer will read the - [Vagrantfile](Vagrantfile) and from that conclude it should shuttle traffic - incoming to your computer on port `8080` to your VM on port `8080`. +When you run `./dev upgrade`, that in turn runs the `kubectl port-forward` +command to shuttle traffic from port `8080` to the `proxy-public` Kubernetes +Service (port `80`) that we want to communicate with, it is the gate to speak +with the hub and proxy even though it is also possible to speak directly to +the hub. -2. *Traffic entering your VM should go to your Kubernetes cluster's Service named `proxy-public`.* +Consider this example issue. Assume you setup a `kind` Kubernetes cluster on +your local computer, and also let incoming traffic on `8080` go straight ot this +cluster using the `kubectl port-forward` command. What would happen if you start +up a VM with `vagrant up` and, Vagrant was configured in the Vagrantfile to want +traffic coming to your computer on `8080` to go towards it? Then you would have +asked for traffic to go both to the Kubernetes cluster and to your VM. You would +experience an error like the one below. - When you run `./dev upgrade`, that in turn runs the `kubectl port-forward` - command to shuttle traffic from port `8080` to the `proxy-public` Kubernetes - Service (port `80`) that we want to communicate with, it is the gate to speak - with the hub and proxy even though it is also possible to speak directly to - the hub. - -In short, the traffic is routed from computer (8080), to the VM (8080), to the -Kubernetes `proxy-public` Service (80). +``` +Vagrant cannot forward the specified ports on this VM, since they +would collide with some other application that is already listening +on these ports. The forwarded port to 8080 is already in use +on the host machine. +``` -The reason you may run into an issue if is there is another service already -listening on traffic arriving on a given port. Then you would need to either -shut it down or route traffic differently. +To conclude: you may run into an issue like this if is there is another service +already listening on traffic arriving on a given port you want to use. Then you +would need to either shut the blocking service down or route traffic +differently. diff --git a/dev b/dev index 0676781391..3414457431 100755 --- a/dev +++ b/dev @@ -265,7 +265,7 @@ def upgrade(values): def port_forward(): host = os.environ.get("Z2JH_PORT_FORWARD_ADDRESS", "localhost") port = os.environ.get("Z2JH_PORT_FORWARD_PORT", "8080") - hub_api_url = f"http://{host}:{port}" + service_url = f"http://{host}:{port}" print("Run and detach a process to run the kubectl port-forward command.") proc = _run( @@ -273,7 +273,7 @@ def port_forward(): "kubectl", "port-forward", "service/proxy-public", "--namespace", os.environ["Z2JH_KUBE_NAMESPACE"], "--context", os.environ["Z2JH_KUBE_CONTEXT"], - "--pod-running-timeout", "10s", + "--pod-running-timeout", "1s", "--address", host, f'{port}:80', ], @@ -281,7 +281,10 @@ def port_forward(): ) try: - response = requests.get(hub_api_url, timeout=10.05) + # Ensure there has been enough time for the detached port-forwarding + # process to establish a connection + time.sleep(1.05) + response = requests.get(service_url, timeout=1) except requests.exceptions.ConnectionError as e: # this is a signature of a failed port forwarding print("Port-forwarding failed!") From a619310aade5028be85d2ab1284c5c7c3b44e08b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 02:40:50 +0200 Subject: [PATCH 69/77] Get Python 3.6 right again --- dev | 3 --- 1 file changed, 3 deletions(-) diff --git a/dev b/dev index 3414457431..f0b479a227 100755 --- a/dev +++ b/dev @@ -455,9 +455,6 @@ def _print_command(text): def _run(cmd, detach=False, print_command=True, print_end="\n", print_error=True, error_callback=None, exit_on_error=True, **kwargs): """Run a subcommand and exit if it fails""" if kwargs.get("capture_output", None): - if kwargs.get("text", None) is None: - kwargs["text"] = True - # FIXME: This following lines are a removable workaround at the time we # assume Python 3.7+, but it is required for Python 3.6. del kwargs["capture_output"] From dc1a3d8f8e8ca457c0834748cb84b806eec6b24a Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 08:33:39 +0200 Subject: [PATCH 70/77] Prefix with Z2JH_ for our env vars --- .travis.yml | 16 +++++------ ci/common | 82 ++++++++++++++++++++++++++--------------------------- dev | 27 ++++++++++-------- 3 files changed, 64 insertions(+), 61 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1cd4fb94dd..c9dcae73a6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,21 +48,21 @@ env: ## ref: https://docs.travis-ci.com/user/build-stages/#build-stages-and-build-matrix-expansion ## ## - ## KUBE_VERSION should match a released kindest/node image tag, but they are - ## currently not automatically published. + ## Z2JH_KUBE_VERSION should match a released kindest/node image tag, but they + ## are currently not automatically published. ## ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes-sigs/kind/issues/197 ## - ## NOTE: KUBE_VERSION 1.16.1 is disabled until calcio has updated their + ## NOTE: Z2JH_KUBE_VERSION 1.16.1 is disabled until calcio has updated their ## calico-etcd.yaml containing an old invalid DaemonSet's apiVersion. ## ## ref: https://github.com/projectcalico/calico/issues/2915 ## - # - KUBE_VERSION=1.16.1 (Required bumped Helm version, but that version currently errors) - - KUBE_VERSION=1.15.3 - - KUBE_VERSION=1.14.6 - - KUBE_VERSION=1.13.10 + # - Z2JH_KUBE_VERSION=1.16.1 (Required bumped Helm version, but that version currently errors) + - Z2JH_KUBE_VERSION=1.15.3 + - Z2JH_KUBE_VERSION=1.14.6 + - Z2JH_KUBE_VERSION=1.13.10 jobs: ## include additional individual jobs @@ -74,7 +74,7 @@ jobs: - setup_kubeval - ./dev check templates env: - - VALIDATE_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0,1.16.0 + - Z2JH_VALIDATE_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0,1.16.0 - stage: publish script: - setup_helm diff --git a/ci/common b/ci/common index 147d8abff5..d4aa4bd4b3 100755 --- a/ci/common +++ b/ci/common @@ -8,85 +8,85 @@ export PATH="$PWD/bin:$PATH" ## NOTE: These are default values for relevant environment variables ## -if [ -z ${KUBE_VERSION:-} ]; then - ## NOTE: KUBE_VERSION is limited by the available kindest/node images +if [ -z ${Z2JH_KUBE_VERSION:-} ]; then + ## NOTE: Z2JH_KUBE_VERSION is limited by the available kindest/node images ## ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes/kubernetes/releases ## - export KUBE_VERSION=1.15.3 + export Z2JH_KUBE_VERSION=1.15.3 fi -if [ -z ${KIND_VERSION:-} ]; then +if [ -z ${Z2JH_KIND_VERSION:-} ]; then ## ref: https://github.com/kubernetes-sigs/kind/releases ## - export KIND_VERSION=0.5.1 + export Z2JH_KIND_VERSION=0.5.1 fi -if [ -z ${HELM_VERSION:-} ]; then +if [ -z ${Z2JH_HELM_VERSION:-} ]; then ## ref: https://github.com/helm/helm/releases ## ## FIXME: Helm version 2.15.0-rc.1 errored with "Transport is closing", ## but Kubernetes 1.16 requires a fix for tiller that is using the ## apiVersion: extensions/v1beta1 still. - export HELM_VERSION=2.14.3 + export Z2JH_HELM_VERSION=2.14.3 fi -if [ -z ${KUBEVAL_VERSION:-} ]; then +if [ -z ${Z2JH_KUBEVAL_VERSION:-} ]; then ## ref: https://github.com/instrumenta/kubeval/releases ## - export KUBEVAL_VERSION=0.14.0 + export Z2JH_KUBEVAL_VERSION=0.14.0 fi ## NOTE: The setup_... functions cache downloads but ensure the correct version ## setup_kubectl () { - echo "setup kubectl ${KUBE_VERSION}" - if ! [ -f "bin/kubectl-${KUBE_VERSION}" ]; then - curl -sSLo "bin/kubectl-${KUBE_VERSION}" "https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl" - chmod +x "bin/kubectl-${KUBE_VERSION}" + echo "setup kubectl ${Z2JH_KUBE_VERSION}" + if ! [ -f "bin/kubectl-${Z2JH_KUBE_VERSION}" ]; then + curl -sSLo "bin/kubectl-${Z2JH_KUBE_VERSION}" "https://storage.googleapis.com/kubernetes-release/release/v${Z2JH_KUBE_VERSION}/bin/linux/amd64/kubectl" + chmod +x "bin/kubectl-${Z2JH_KUBE_VERSION}" fi - cp "bin/kubectl-${KUBE_VERSION}" bin/kubectl + cp "bin/kubectl-${Z2JH_KUBE_VERSION}" bin/kubectl } setup_kind () { - echo "setup kind ${KIND_VERSION}" - if ! [ -f "bin/kind-${KIND_VERSION}" ]; then - curl -sSLo "bin/kind-${KIND_VERSION}" "https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64" - chmod +x "bin/kind-${KIND_VERSION}" + echo "setup kind ${Z2JH_KIND_VERSION}" + if ! [ -f "bin/kind-${Z2JH_KIND_VERSION}" ]; then + curl -sSLo "bin/kind-${Z2JH_KIND_VERSION}" "https://github.com/kubernetes-sigs/kind/releases/download/v${Z2JH_KIND_VERSION}/kind-linux-amd64" + chmod +x "bin/kind-${Z2JH_KIND_VERSION}" fi - cp "bin/kind-${KIND_VERSION}" bin/kind + cp "bin/kind-${Z2JH_KIND_VERSION}" bin/kind } setup_helm () { - echo "setup helm ${HELM_VERSION}" - if ! [ -f "bin/helm-${HELM_VERSION}" ]; then - curl -sSLo "bin/helm-${HELM_VERSION}.tar.gz" "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz" - tar -xf "bin/helm-${HELM_VERSION}.tar.gz" --directory bin --strip-components 1 linux-amd64/helm - rm "bin/helm-${HELM_VERSION}.tar.gz" - mv bin/helm "bin/helm-${HELM_VERSION}" + echo "setup helm ${Z2JH_HELM_VERSION}" + if ! [ -f "bin/helm-${Z2JH_HELM_VERSION}" ]; then + curl -sSLo "bin/helm-${Z2JH_HELM_VERSION}.tar.gz" "https://storage.googleapis.com/kubernetes-helm/helm-v${Z2JH_HELM_VERSION}-linux-amd64.tar.gz" + tar -xf "bin/helm-${Z2JH_HELM_VERSION}.tar.gz" --directory bin --strip-components 1 linux-amd64/helm + rm "bin/helm-${Z2JH_HELM_VERSION}.tar.gz" + mv bin/helm "bin/helm-${Z2JH_HELM_VERSION}" fi - cp bin/helm-${HELM_VERSION} bin/helm + cp bin/helm-${Z2JH_HELM_VERSION} bin/helm } setup_kubeval () { - echo "setup kubeval ${KUBEVAL_VERSION}" - if ! [ -f "bin/kubeval-${KUBEVAL_VERSION}" ]; then - curl -sSLo "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" "https://github.com/instrumenta/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz" - tar -xf "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" --directory bin - rm "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" - mv bin/kubeval "bin/kubeval-${KUBEVAL_VERSION}" + echo "setup kubeval ${Z2JH_KUBEVAL_VERSION}" + if ! [ -f "bin/kubeval-${Z2JH_KUBEVAL_VERSION}" ]; then + curl -sSLo "bin/kubeval-${Z2JH_KUBEVAL_VERSION}.tar.gz" "https://github.com/instrumenta/kubeval/releases/download/${Z2JH_KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz" + tar -xf "bin/kubeval-${Z2JH_KUBEVAL_VERSION}.tar.gz" --directory bin + rm "bin/kubeval-${Z2JH_KUBEVAL_VERSION}.tar.gz" + mv bin/kubeval "bin/kubeval-${Z2JH_KUBEVAL_VERSION}" fi - cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval + cp bin/kubeval-${Z2JH_KUBEVAL_VERSION} bin/kubeval } setup_git_crypt () { - GIT_CRYPT_VERSION=0.5.0 - GIT_CRYPT_VERSION_SHA=46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b - echo "setup git-crypt ${GIT_CRYPT_VERSION}" - if ! [ -f "bin/git-crypt-${GIT_CRYPT_VERSION}" ]; then - curl -sSLo "bin/git-crypt-${GIT_CRYPT_VERSION}" https://github.com/minrk/git-crypt-bin/releases/download/${GIT_CRYPT_VERSION}/git-crypt - chmod +x "bin/git-crypt-${GIT_CRYPT_VERSION}" - echo "${GIT_CRYPT_VERSION_SHA} bin/git-crypt-${GIT_CRYPT_VERSION}" | shasum -a 256 -c - + Z2JH_GIT_CRYPT_VERSION=0.5.0 + Z2JH_GIT_CRYPT_VERSION_SHA=46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b + echo "setup git-crypt ${Z2JH_GIT_CRYPT_VERSION}" + if ! [ -f "bin/git-crypt-${Z2JH_GIT_CRYPT_VERSION}" ]; then + curl -sSLo "bin/git-crypt-${Z2JH_GIT_CRYPT_VERSION}" https://github.com/minrk/git-crypt-bin/releases/download/${Z2JH_GIT_CRYPT_VERSION}/git-crypt + chmod +x "bin/git-crypt-${Z2JH_GIT_CRYPT_VERSION}" + echo "${Z2JH_GIT_CRYPT_VERSION_SHA} bin/git-crypt-${Z2JH_GIT_CRYPT_VERSION}" | shasum -a 256 -c - fi - cp bin/git-crypt-${GIT_CRYPT_VERSION} bin/git-crypt + cp bin/git-crypt-${Z2JH_GIT_CRYPT_VERSION} bin/git-crypt } if [ "${1:-}" = "--setup" ]; then diff --git a/dev b/dev index f0b479a227..9ce42c4b3a 100755 --- a/dev +++ b/dev @@ -56,7 +56,7 @@ def depend_on(binaries=[], envs=[]): return decorator_depend_on -@depend_on(binaries=["kubectl", "kind", "docker"], envs=["KUBE_VERSION"]) +@depend_on(binaries=["kubectl", "kind", "docker"], envs=["Z2JH_KUBE_VERSION"]) def kind_create(recreate): # check for a existing cluster kind_clusters = _run( @@ -93,7 +93,7 @@ def kind_create(recreate): _run([ "kind", "create", "cluster", "--name", "jh-dev", - "--image", f"kindest/node:v{os.environ['KUBE_VERSION']}", + "--image", f"kindest/node:v{os.environ['Z2JH_KUBE_VERSION']}", "--config", "ci/kind-config.yaml", ]) @@ -315,11 +315,11 @@ def test(): ) -@depend_on(binaries=["helm", "yamllint", "kubeval"], envs=["VALIDATE_KUBE_VERSIONS"]) +@depend_on(binaries=["helm", "yamllint", "kubeval"], envs=["Z2JH_VALIDATE_KUBE_VERSIONS"]) def check_templates(): _run([ "python3", "tools/templates/lint-and-validate.py", - "--kubernetes-versions", os.environ["VALIDATE_KUBE_VERSIONS"], + "--kubernetes-versions", os.environ["Z2JH_VALIDATE_KUBE_VERSIONS"], ]) @@ -578,9 +578,11 @@ if __name__ == "__main__": # ----------------------------------------------------------------- # # GITHUB_ACCESS_TOKEN is needed to generate changelog entries etc. + # For private repositories you don't have to give this token any + # privileges when you create it here: https://github.com/settings/tokens/new # GITHUB_ACCESS_TOKEN="" - # + # KUBECONFIG is required to be set explicitly in order to avoid # potential modifications of non developer clusters. It should # be to the path where the kubernetes config resides. @@ -588,26 +590,26 @@ if __name__ == "__main__": # The "./dev kind create" command will set this files KUBECONFIG # entry automatically on cluster creation. KUBECONFIG="" - # + # Z2JH_KUBE_CONTEXT and Z2JH_KUBE_NAMESPACE is used to ensure we # work with the right cluster, with the right credentials, and in # the right namespace without modifying the provided KUBECONFIG. Z2JH_KUBE_CONTEXT="" Z2JH_KUBE_NAMESPACE="" - # KUBE_VERSION is used to create a kind cluster. Note that only + # Z2JH_KUBE_VERSION is used to create a kind cluster. Note that only # versions that are found on kindest/node can be used. # # ref: https://hub.docker.com/r/kindest/node/tags - KUBE_VERSION="1.15.3" - # - # VALIDATE_KUBE_VERSIONS is influences "./dev check templates", + Z2JH_KUBE_VERSION="1.15.3" + + # Z2JH_VALIDATE_KUBE_VERSIONS is influences "./dev check templates", # what Kubernetes versions do we validate against? Note that only # versions found on instrumenta/kubernetes-json-schema can be used. # # ref: https://github.com/instrumenta/kubernetes-json-schema - VALIDATE_KUBE_VERSIONS="1.15.0" - # + Z2JH_VALIDATE_KUBE_VERSIONS="1.15.0" + # Z2JH_PORT_FORWARD_ADDRESS and Z2JH_PORT_FORWARD_PORT influences # "./dev port-forward" and where "./dev test" will look to access # the proxy-public Kubernetes service. @@ -619,6 +621,7 @@ if __name__ == "__main__": with open('.env', 'w+') as f: f.write(default_dotenv_file) + dotenv.load_dotenv(override=True) # run suitable command and pass arguments From 599cd9ced81ebbce860bcf707486b611aaede4be Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 08:43:47 +0200 Subject: [PATCH 71/77] Selective environment override --- dev | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/dev b/dev index 9ce42c4b3a..8ea4f7b2d7 100755 --- a/dev +++ b/dev @@ -573,8 +573,11 @@ if __name__ == "__main__": if not os.path.exists(".env"): default_dotenv_file = textwrap.dedent( """\ - # Environment variables loaded and used by the ./dev script. They - # will take precedence over system variables. + # Environment variables loaded and used by the ./dev script. + # + # Z2JH_ prefixed environment variables already defined will take + # precedence if they are defined in the system already, while the + # others are required to be explicitly set here. # ----------------------------------------------------------------- # # GITHUB_ACCESS_TOKEN is needed to generate changelog entries etc. @@ -622,7 +625,9 @@ if __name__ == "__main__": f.write(default_dotenv_file) - dotenv.load_dotenv(override=True) + dotenv.load_dotenv(override=False) + os.environ["GITHUB_ACCESS_TOKEN"] = dotenv.get_key(".env", "GITHUB_ACCESS_TOKEN") + os.environ["KUBECONFIG"] = dotenv.get_key(".env", "KUBECONFIG") # run suitable command and pass arguments if args.cmd == "kind": From bc7f593a25c3a0fef14dfb06a4b83995e5e6d4e1 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 08:52:33 +0200 Subject: [PATCH 72/77] Add note about DNS failure --- tests/test_spawn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_spawn.py b/tests/test_spawn.py index 640c811af2..1ad2ee5a23 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -173,7 +173,7 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): "--", "wget", "--quiet", "--tries=1", "--timeout=3", allowed_url, ]) - assert c.returncode == 0, "Unable to get allowed domain" + assert c.returncode == 0, "Unable to get allowed domain (or failed to resolve the domain name)" c = subprocess.run([ "kubectl", "exec", pod_name, From 752dc8320afd1b8398a49fcceb9c9a6a6d57c0f7 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 10:48:47 +0200 Subject: [PATCH 73/77] Add warning when non Z2JH_ prefixed envs are overridden --- CONTRIBUTING.md | 4 ++-- dev | 12 ++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5f25797fce..6db5d34003 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -273,8 +273,8 @@ As you may notice, typical keywords associated with network errors are: #### kind load docker-image issues -This is an error I got on Ubuntu using docker version 18.06.1-ce. I upgraded to -use a newer version of docker and has not experienced it since. +This is an error experienced using docker version 18.06.1-ce, and it has not +reoccurred since upgrading to 19.03.1-ce. ``` $ python3 ci/kind-load-docker-images.py --kind-cluster jh-dev diff --git a/dev b/dev index 8ea4f7b2d7..13d371933b 100755 --- a/dev +++ b/dev @@ -626,8 +626,16 @@ if __name__ == "__main__": dotenv.load_dotenv(override=False) - os.environ["GITHUB_ACCESS_TOKEN"] = dotenv.get_key(".env", "GITHUB_ACCESS_TOKEN") - os.environ["KUBECONFIG"] = dotenv.get_key(".env", "KUBECONFIG") + + # let these environment values in the .env file override the system + # environment variables, but let the user know if an override happen + env_keys = ["GITHUB_ACCESS_TOKEN", "KUBECONFIG"] + for env_key in env_keys: + env_value_in_os = os.environ.get(env_key, None) + env_value_in_file = dotenv.get_key(".env", env_key) + if env_value_in_os and env_value_in_os != env_value_in_file: + print(f"Warning: system's {env_key} value is ignored in favor of the .env file's value.") + os.environ[env_key] = env_value_in_file # run suitable command and pass arguments if args.cmd == "kind": From 08e302aee69e501a9e14b207534d231e5b1c3139 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 11:03:19 +0200 Subject: [PATCH 74/77] Cleanup and test k8s 1.16 with allow_failures --- .gitignore | 1 - .travis.yml | 14 ++++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 509d2f3d9d..f89c48fb0e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,6 @@ tools/templates/rendered-templates/ bin/ .vagrant/ tools/github.sqlite -ci/daemonset-calico-node.yaml .vscode diff --git a/.travis.yml b/.travis.yml index c9dcae73a6..bdef464c7a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,17 +54,19 @@ env: ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes-sigs/kind/issues/197 ## - ## NOTE: Z2JH_KUBE_VERSION 1.16.1 is disabled until calcio has updated their - ## calico-etcd.yaml containing an old invalid DaemonSet's apiVersion. - ## - ## ref: https://github.com/projectcalico/calico/issues/2915 - ## - # - Z2JH_KUBE_VERSION=1.16.1 (Required bumped Helm version, but that version currently errors) + - Z2JH_KUBE_VERSION=1.16.1 Z2JH_HELM_VERSION=2.15.0-rc.2 - Z2JH_KUBE_VERSION=1.15.3 - Z2JH_KUBE_VERSION=1.14.6 - Z2JH_KUBE_VERSION=1.13.10 jobs: + ## allow a selection of the matrix of jobs to fail + ## + ## ref: https://docs.travis-ci.com/user/customizing-the-build/#rows-that-are-allowed-to-fail + ## + allow_failures: + - env: Z2JH_KUBE_VERSION=1.16.1 + ## include additional individual jobs ## include: From 5328d5a9a7b4b51720c5f2dc249bef1032144e56 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 11:13:49 +0200 Subject: [PATCH 75/77] Optimize CI build order for speed --- .travis.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index bdef464c7a..d2f398e6c3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,18 +54,23 @@ env: ## ref: https://hub.docker.com/r/kindest/node/tags ## ref: https://github.com/kubernetes-sigs/kind/issues/197 ## - - Z2JH_KUBE_VERSION=1.16.1 Z2JH_HELM_VERSION=2.15.0-rc.2 - Z2JH_KUBE_VERSION=1.15.3 - Z2JH_KUBE_VERSION=1.14.6 - Z2JH_KUBE_VERSION=1.13.10 + - &allow_failure Z2JH_KUBE_VERSION=1.16.1 Z2JH_HELM_VERSION=2.15.0-rc.2 jobs: - ## allow a selection of the matrix of jobs to fail + ## allow experimental setups to fail ## ## ref: https://docs.travis-ci.com/user/customizing-the-build/#rows-that-are-allowed-to-fail ## allow_failures: - - env: Z2JH_KUBE_VERSION=1.16.1 + - env: *allow_failure + ## don't wait for the jobs that are allowed to fail to report success + ## + ## ref: https://docs.travis-ci.com/user/customizing-the-build/#fast-finishing + ## + fast_finish: true ## include additional individual jobs ## From 48bd4f47d50d3cb0852ef629e0fd8a16ced074f8 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 18:34:13 +0200 Subject: [PATCH 76/77] Bump chartpress to 0.4.0 --- dev-requirements.txt | 2 +- jupyterhub/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index e5ad3af1a4..d1dd2176b7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -9,7 +9,7 @@ ## ## ref: https://github.com/jupyterhub/chartpress ## -chartpress==0.3.2 +chartpress>=0.4 ## pytest run tests that require requests, pytest is run from test ## script diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 3b8087828c..78d494fe45 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: jupyterhub -version: '0.9.0-dev' +version: set-by-chartpress appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org From a29b987b9abc245a29d6c85656adacf90589fcd7 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Thu, 17 Oct 2019 20:53:30 +0200 Subject: [PATCH 77/77] Bump chartpress to 0.4.1 --- dev-requirements.txt | 2 +- jupyterhub/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index d1dd2176b7..074f222dfd 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -9,7 +9,7 @@ ## ## ref: https://github.com/jupyterhub/chartpress ## -chartpress>=0.4 +chartpress>=0.4.1 ## pytest run tests that require requests, pytest is run from test ## script diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 78d494fe45..98553633e9 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: jupyterhub -version: set-by-chartpress +version: 0.0.1-set.by.chartpress appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org