Skip to content

Test Scorecard in Github CI #480

Test Scorecard in Github CI

Test Scorecard in Github CI #480

Workflow file for this run

# Understanding the workflow file - https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions#understanding-the-workflow-file
name: Pre Submit # workflow name
on: # on events
push:
branches:
- main
- release-*
pull_request:
branches:
- main
- release-*
jobs: # jobs to run
build:
name: Test and build PRs
runs-on: ubuntu-22.04 # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run checks and unit tests
run: make check
- name: Build images
run: make container-build-community
scorecard-k8s:
runs-on: ubuntu-22.04
timeout-minutes: 30
env:
IMAGE_REGISTRY: kind-registry:5000
DEPLOY_NAMESPACE: k8s-test
# see https://github.com/kubernetes-sigs/kind/tags
KIND_VERSION: v0.22.0
# see https://hub.docker.com/r/kindest/node/tags for available versions!
K8S_VERSION: v1.27.11
# https://github.com/operator-framework/operator-lifecycle-manager/releases
OLM_VERSION: v0.27.0
VERSION: 0.18.0
PREVIOUS_VERSION: 0.17.0
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
# - name: Set up Go
# uses: actions/setup-go@v4
# with:
# go-version-file: go.mod
# - name: Configure insecure registry
# run: |
# #sudo cat /etc/docker/daemon.json
# # allow insecure registry but keep original config!
# sudo bash -c "cat <<EOF >/etc/docker/daemon.json
# {
# \"exec-opts\": [\"native.cgroupdriver=cgroupfs\"],
# \"cgroup-parent\": \"/actions_job\",
# \"insecure-registries\" : [\"${IMAGE_REGISTRY}\"]
# }
# EOF"
# #sudo cat /etc/docker/daemon.json
# sudo systemctl restart docker
# # same for podman
# sudo bash -c "cat <<EOF >/etc/containers/registries.conf
# [[registry]]
# location=\"${IMAGE_REGISTRY}\"
# insecure=true
# EOF"
# #sudo cat /etc/containers/registries.conf
- name: Start kind cluster
uses: container-tools/kind-action@v2
with:
version: ${{env.KIND_VERSION}}
config: ./hack/kind-config.yaml
node_image: kindest/node:${{env.K8S_VERSION}}
kubectl_version: ${{env.K8S_VERSION}}
registry: true
- name: Cluster info
run: |
kubectl version -o=yaml
kubectl cluster-info
kubectl get nodes -o=wide
- name: Build bundle-community
run: |
PREVIOUS_VERSION=${{env.PREVIOUS_VERSION}} VERSION=${{env.VERSION}} make bundle-community
- name: Run scorecard tests
run: |
bin/operator-sdk/*/operator-sdk scorecard -n default ./bundle
# - name: Deploy NHC + SNR
# run: |
# kubectl create ns ${DEPLOY_NAMESPACE}
# # deploy SNR first
# # HEADS UP: using a custom build of SNR here which uses `systemctl start kubelet` as "reboot" command,
# # everything else does not work with kind.
# # See https://github.com/slintes/self-node-remediation/commit/a8aa0f73a240509cbd446fae048b15ed11a20eea
# operator-sdk run bundle -n ${DEPLOY_NAMESPACE} quay.io/slintes/self-node-remediation-operator-bundle:v0.1.0-start-kubelet
# # deploy NHC
# operator-sdk run bundle -n ${DEPLOY_NAMESPACE} --use-http ${IMAGE_REGISTRY}/node-healthcheck-operator-bundle:latest
# # wait a bit for OLM creating CSV etc
# sleep 1m
# - name: OLM status
# if: ${{ always() }}
# run: |
# kubectl get -A OperatorGroup -o wide
# kubectl get -A CatalogSource -o wide
# kubectl get -A Subscription -o wide
# kubectl get -A ClusterServiceVersion -o wide
# kubectl get -A InstallPlan -o wide
# - name: Wait for deployments
# run: |
# kubectl wait deployment -n ${DEPLOY_NAMESPACE} self-node-remediation-controller-manager --for condition=Available=True --timeout=300s
# # give SNR some time to create the DS
# sleep 30s
# kubectl rollout status daemonset -n ${DEPLOY_NAMESPACE} self-node-remediation-ds --timeout 300s
# kubectl wait deployment -n ${DEPLOY_NAMESPACE} node-healthcheck-controller-manager --for condition=Available=True --timeout=300s
# - name: Deployment status
# if: ${{ always() }}
# run: |
# kubectl -n ${DEPLOY_NAMESPACE} get deployments,daemonsets,pods -o=wide
# - name: Run NHC e2e
# run: |
# echo "running e2e test"
# OPERATOR_NS=${DEPLOY_NAMESPACE} SNR_STRATEGY=OutOfServiceTaint LABEL_FILTER="!OCP-ONLY" make test-e2e
# echo "finished e2e test"
# - name: Debug
# if: ${{ failure() }}
# run: |
# # debug NHC
# echo "Debugging NHC"
# kubectl describe deployment -n ${DEPLOY_NAMESPACE} node-healthcheck-controller-manager
# echo "\n\n"
# kubectl describe pod -n ${DEPLOY_NAMESPACE} --selector=app.kubernetes.io/name=node-healthcheck-operator,app.kubernetes.io/component=controller-manager
# echo "\n\n"
# kubectl logs -n ${DEPLOY_NAMESPACE} -c manager --selector=app.kubernetes.io/name=node-healthcheck-operator,app.kubernetes.io/component=controller-manager --tail -1
# echo "\n\n"
# echo "Debugging SNR operator"
# kubectl describe deployment -n ${DEPLOY_NAMESPACE} self-node-remediation-controller-manager
# echo "\n\n"
# kubectl describe pod -n ${DEPLOY_NAMESPACE} --selector=self-node-remediation-operator=,control-plane=controller-manager
# echo "\n\n"
# kubectl logs -n ${DEPLOY_NAMESPACE} -c manager --selector=self-node-remediation-operator=,control-plane=controller-manager --tail -1
# echo "\n\n"
# echo "Debugging SNR agents"
# kubectl describe daemonset -n ${DEPLOY_NAMESPACE} self-node-remediation-ds
# echo "\n\n"
# kubectl describe pod -n ${DEPLOY_NAMESPACE} --selector=app.kubernetes.io/name=self-node-remediation,app.kubernetes.io/component=agent
# echo "\n\n"
# kubectl logs -n ${DEPLOY_NAMESPACE} --selector=app.kubernetes.io/name=self-node-remediation,app.kubernetes.io/component=agent --tail -1