Skip to content

Commit

Permalink
Tets PR for Integration test
Browse files Browse the repository at this point in the history
  • Loading branch information
rrajendran17 committed Jan 31, 2025
1 parent b3ad6a1 commit 5f88993
Show file tree
Hide file tree
Showing 7 changed files with 413 additions and 0 deletions.
173 changes: 173 additions & 0 deletions .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
name: Harvester-Network-Controller CI

# synchronize for pull request update
on:
push:
branches:
- master
- 'v**'
pull_request:
types: [opened, reopened, synchronize]

env:
LIBVIRT_DEFAULT_URI: "qemu:///system"
VM_DEPLOYED: false

jobs:
validation:
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Run validations"
run: |
make validate
make validate-ci
job-new-installation:
needs: validation
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Build the Image for the Integration Test"
run: |
BUILD_FOR_CI=true make
./ci/scripts/patch-ttl-repo.sh
echo "NCH override result as below:"
cat ci/charts/nch-override.yaml
- name: "Local Deployment (Harvester+NetworkController) for testing"
id: vm_deploy
run: |
rm -rf nch-new-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s nch-new-vagrant-k3s
pushd nch-new-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
yq e -i ".longhorn_version = \"1.7.1\"" settings.yaml
./scripts/deploy_longhorn.sh
popd
- name: "Patch Image target"
run: |
./ci/scripts/patch-ttl-repo.sh
echo "NCH override result as below:"
cat ci/charts/nch-override.yaml
- name: "Deploy NCH"
run: |
pushd nch-new-vagrant-k3s
cp ../ci/scripts/deploy_nch_current.sh ./deploy_nch_current.sh
cp ../ci/charts/nch-override.yaml ./nch-override.yaml
./deploy_nch_current.sh
popd
- name: "Add disk"
run: |
pushd nch-new-vagrant-k3s
./scripts/attach-disk.sh node1 nch-new-vagrant-k3s
sleep 30
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd nch-new-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
echo Running integration tests
NCH_HOME=`pwd` go test -v ./tests/...
- name: "Get NCH logs"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip getting logs"
exit 0
fi
./ci/scripts/get-debug-info.sh
- name: "Tear Down / Cleanup"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip VM destroy"
exit 0
fi
rm -rf /tmp/network-controller/nch-new-vagrant-k3s
pushd nch-new-vagrant-k3s
vagrant destroy -f --parallel
popd
jobs-upgrade:
needs: validation
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Build the Image for the Integration Test"
run: |
BUILD_FOR_CI=true make
./ci/scripts/patch-ttl-repo.sh
echo "NCH override result as below:"
cat ci/charts/nch-override.yaml
- name: "Local Deployment (Harvester+NetworkController) for testing"
id: vm_deploy
run: |
rm -rf nch-upgrade-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s nch-upgrade-vagrant-k3s
pushd nch-upgrade-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
yq e -i ".longhorn_version = \"1.7.1\"" settings.yaml
./scripts/deploy_longhorn.sh
cp ../ci/scripts/deploy_nch_chart.sh ./deploy_nch_chart.sh
./deploy_nch_chart.sh
popd
- name: "Add disk"
run: |
pushd nch-upgrade-vagrant-k3s
./scripts/attach-disk.sh node1 nch-upgrade-vagrant-k3s
sleep 30
popd
- name: "Patch Image target (for upgrade)"
run: |
./ci/scripts/patch-ttl-repo.sh
echo "NCH override result as below:"
cat ci/charts/nch-override.yaml
- name: "Upgrade NCH"
run: |
pushd nch-upgrade-vagrant-k3s
cp ../ci/scripts/upgrade_nch.sh ./upgrade_nch.sh
./upgrade_nch.sh
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd nch-upgrade-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
echo Running integration tests
NCH_HOME=`pwd` go test -v ./tests/...
- name: "Get NCH logs"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip getting logs"
exit 0
fi
./ci/scripts/get-debug-info.sh
- name: "Tear Down / Cleanup"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip VM destroy"
exit 0
fi
rm -rf /tmp/network-controller/nch-upgrade-vagrant-k3s
pushd nch-upgrade-vagrant-k3s
vagrant destroy -f --parallel
popd
17 changes: 17 additions & 0 deletions ci/charts/nch-override.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
image:
repository: rancher/harvester-network-controller
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""

webhook:
repository: rancher/harvester-network-webhook
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""

autoProvisionFilter: [/dev/sd*]
debug: true

# we only manually inject udev monitor error once, so we can test it in CI.
injectUdevMonitorError: true
63 changes: 63 additions & 0 deletions ci/scripts/deploy_nch_chart.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

ensure_network_controller_ready() {
while [ true ]; do
running_num=$(kubectl get pods -n harvester-system |grep ^network-controller |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "network-controller pods are ready!"
break
fi
echo "check network-controller pods failure."
exit 1
done
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"
ensure_network_controller_ready

pushd $TOP_DIR

cat >> nch-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f nch-override.yaml ]; then
mv nch-override.yaml.default nch-override.yaml
fi

$HELM pull harvester-network-controller --repo https://charts.harvesterhci.io --untar
$HELM install -f $TOP_DIR/nch-override.yaml harvester-network-controller ./harvester-network-controller --create-namespace -n harvester-system

popd
65 changes: 65 additions & 0 deletions ci/scripts/deploy_nch_current.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

ensure_network_controller_ready() {
while [ true ]; do
running_num=$(kubectl get pods -n harvester-system |grep ^network-controller |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "network-controller pods are ready!"
break
fi
echo "check network-controller failure."
exit 1
done
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"
ensure_network_controller_ready

pushd $TOP_DIR
cat >> nch-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f nch-override.yaml ]; then
mv nch-override.yaml.default nch-override.yaml
fi

cp -r ../deploy/charts/harvester-node-disk-manager harvester-network-cntroller

target_img=$(yq -e .image.repository nch-override.yaml)
echo "install target image: ${target_img}"
$HELM install -f $TOP_DIR/nch-override.yaml harvester-network-cntroller ./harvester-network-cntroller --create-namespace -n harvester-system

popd
10 changes: 10 additions & 0 deletions ci/scripts/get-debug-info.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash -e

TARGETNODE="node1"

export KUBECONFIG=kubeconfig

NDMPOD=$(kubectl get pods -n harvester-system --field-selector spec.nodeName=$TARGETNODE | grep ^harvester-network-cntroller |grep -v webhook |awk '{print $1}')

# filter out the redundant Skip log
kubectl logs $NDMPOD -n harvester-system |grep -v Skip
10 changes: 10 additions & 0 deletions ci/scripts/patch-ttl-repo.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash -e

COMMIT=$(git rev-parse --short HEAD)
IMAGE=ttl.sh/network-controller-harvester-${COMMIT}
IMAGE_WEBHOOK=ttl.sh/node-disk-manager-webhook-${COMMIT}

yq e -i ".image.repository = \"${IMAGE}\"" ci/charts/nch-override.yaml
yq e -i ".image.tag = \"1h\"" ci/charts/nch-override.yaml
yq e -i ".webhook.image.repository = \"${IMAGE_WEBHOOK}\"" ci/charts/nch-override.yaml
yq e -i ".webhook.image.tag = \"1h\"" ci/charts/nch-override.yaml
Loading

0 comments on commit 5f88993

Please sign in to comment.