From fb53aaebc699e9e163132ef5a131adc7b39282da Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Mon, 15 Apr 2024 10:32:15 +0200 Subject: [PATCH] NetworkDiagnosticsConfig tests Signed-off-by: Patryk Diak --- .../networking/network_diagnostics.go | 289 ++++++++++++++++++ .../generated/zz_generated.annotations.go | 12 + zz_generated.manifests/test-reporting.yaml | 14 + 3 files changed, 315 insertions(+) create mode 100644 test/extended/networking/network_diagnostics.go diff --git a/test/extended/networking/network_diagnostics.go b/test/extended/networking/network_diagnostics.go new file mode 100644 index 000000000000..a2ab30936db0 --- /dev/null +++ b/test/extended/networking/network_diagnostics.go @@ -0,0 +1,289 @@ +package networking + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + applyconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + "k8s.io/apimachinery/pkg/api/meta" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + + exutil "github.com/openshift/origin/test/extended/util" +) + +const ( + namespace = "openshift-network-diagnostics" + clusterConfig = "cluster" + condition = "NetworkDiagnosticsAvailable" + fieldManager = "network-diagnostics-e2e" +) + +var _ = g.Describe("[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig]", func() { + defer g.GinkgoRecover() + oc := exutil.NewCLIWithoutNamespace("network-diagnostics") + + g.AfterEach(func(ctx context.Context) { + // Reset network diagnostics config + netConfigApply := applyconfigv1.Network(clusterConfig).WithSpec( + applyconfigv1.NetworkSpec().WithNetworkDiagnostics(nil), + ) + _, err := oc.AdminConfigClient().ConfigV1().Networks().Apply(ctx, netConfigApply, + metav1.ApplyOptions{FieldManager: fieldManager, Force: true}) + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + g.It("Should be enabled by default", func(ctx context.Context) { + o.Eventually(func() bool { + g.By("running one network-check-source pod") + srcPods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=network-check-source"}) + if err != nil { + framework.Logf("Error getting pods in %s namespace: %v", namespace, err) + return false + } + if len(srcPods.Items) != 1 { + framework.Logf("Invalid amount of source pods: %d", len(srcPods.Items)) + return false + } + + g.By("running a network-check-target pod on every node") + targetPods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=network-check-target"}) + if err != nil { + framework.Logf("Error getting pods in %s namespace: %v", namespace, err) + return false + } + nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + framework.Logf("Error getting nodes: %v", err) + return false + } + if len(targetPods.Items) != len(nodes.Items) { + framework.Logf("Invalid amount of destination pods want:%d, got: %d", len(nodes.Items), len(targetPods.Items)) + return false + } + + cfg, err := oc.AdminConfigClient().ConfigV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster config: %v", err) + return false + } + return meta.IsStatusConditionTrue(cfg.Status.Conditions, condition) + }, 5*time.Minute, 10*time.Second).Should(o.BeTrue()) + }) + + g.It("Should remove all network diagnostics pods when disabled", func(ctx context.Context) { + netConfigApply := applyconfigv1.Network(clusterConfig).WithSpec( + applyconfigv1.NetworkSpec().WithNetworkDiagnostics( + applyconfigv1.NetworkDiagnostics().WithMode(configv1.NetworkDiagnosticsDisabled), + ), + ) + _, err := oc.AdminConfigClient().ConfigV1().Networks().Apply(ctx, netConfigApply, + metav1.ApplyOptions{FieldManager: fieldManager, Force: true}) + o.Expect(err).NotTo(o.HaveOccurred()) + + o.Eventually(func() bool { + pods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + framework.Logf("Error getting pods in %s namespace: %v", namespace, err) + return false + } + if len(pods.Items) != 0 { + return false + } + + cfg, err := oc.AdminConfigClient().ConfigV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster config: %v", err) + return false + } + return meta.IsStatusConditionFalse(cfg.Status.Conditions, condition) + }, 5*time.Minute, 10*time.Second).Should(o.BeTrue()) + }) + + g.It("Should move the source diagnostics pods based on the new selector and tolerations", func(ctx context.Context) { + // Intentionally omit setting the mode to ensure that the diagnostics are enabled when it is unset + netConfigApply := applyconfigv1.Network(clusterConfig).WithSpec( + applyconfigv1.NetworkSpec().WithNetworkDiagnostics( + applyconfigv1.NetworkDiagnostics(). + WithSourcePlacement( + applyconfigv1.NetworkDiagnosticsSourcePlacement(). + WithNodeSelector(map[string]string{"node-role.kubernetes.io/master": ""}). + WithTolerations(corev1.Toleration{ + Operator: corev1.TolerationOpExists, + }), + ), + ), + ) + _, err := oc.AdminConfigClient().ConfigV1().Networks().Apply(ctx, netConfigApply, + metav1.ApplyOptions{FieldManager: fieldManager, Force: true}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Eventually(func() bool { + pods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=network-check-source"}) + if err != nil { + framework.Logf("Error getting pods in %s namespace: %v", namespace, err) + return false + } + if len(pods.Items) == 0 { + framework.Logf("No diagnostics pods found") + return false + } + for _, pod := range pods.Items { + if pod.Spec.NodeName == "" { + framework.Logf("Diagnostics pod %s is not scheduled to any node", pod.Name) + return false + } + node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting node %s: %v", pod.Spec.NodeName, err) + return false + } + if _, ok := node.Labels["node-role.kubernetes.io/master"]; !ok { + framework.Logf("Diagnostics pod %s is not scheduled to a master node", pod.Name) + return false + } + } + cfg, err := oc.AdminConfigClient().ConfigV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster config: %v", err) + return false + } + return meta.IsStatusConditionTrue(cfg.Status.Conditions, condition) + }, 5*time.Minute, 10*time.Second).Should(o.BeTrue()) + }) + + g.It("Should move the target diagnostics pods based on the new selector and tolerations", func(ctx context.Context) { + netConfigApply := applyconfigv1.Network(clusterConfig).WithSpec( + applyconfigv1.NetworkSpec().WithNetworkDiagnostics( + applyconfigv1.NetworkDiagnostics(). + WithMode(configv1.NetworkDiagnosticsAll). + WithTargetPlacement( + applyconfigv1.NetworkDiagnosticsTargetPlacement(). + WithNodeSelector(map[string]string{"node-role.kubernetes.io/master": ""}). + WithTolerations(corev1.Toleration{ + Operator: corev1.TolerationOpExists, + Key: "node-role.kubernetes.io/master", + Effect: corev1.TaintEffectNoSchedule, + }), + ), + ), + ) + _, err := oc.AdminConfigClient().ConfigV1().Networks().Apply(ctx, netConfigApply, + metav1.ApplyOptions{FieldManager: fieldManager, Force: true}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Eventually(func() bool { + pods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=network-check-target"}) + if err != nil { + framework.Logf("Error getting pods in %s namespace: %v", namespace, err) + return false + } + if len(pods.Items) == 0 { + framework.Logf("No diagnostics pods found") + return false + } + for _, pod := range pods.Items { + if pod.Spec.NodeName == "" { + framework.Logf("Diagnostics pod %s is not scheduled to any node", pod.Name) + return false + } + node, err := oc.AdminKubeClient().CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting node %s: %v", pod.Spec.NodeName, err) + return false + } + if _, ok := node.Labels["node-role.kubernetes.io/master"]; !ok { + framework.Logf("Diagnostics pod %s is not scheduled to a master node", pod.Name) + return false + } + } + cfg, err := oc.AdminConfigClient().ConfigV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster config: %v", err) + return false + } + return meta.IsStatusConditionTrue(cfg.Status.Conditions, condition) + }, 5*time.Minute, 10*time.Second).Should(o.BeTrue()) + }) + + g.It("Should function without any target pods", func(ctx context.Context) { + netConfigApply := applyconfigv1.Network(clusterConfig).WithSpec( + applyconfigv1.NetworkSpec().WithNetworkDiagnostics( + applyconfigv1.NetworkDiagnostics(). + WithMode(configv1.NetworkDiagnosticsAll). + WithTargetPlacement( + applyconfigv1.NetworkDiagnosticsTargetPlacement(). + WithNodeSelector(map[string]string{"alien": ""}), + ), + ), + ) + _, err := oc.AdminConfigClient().ConfigV1().Networks().Apply(ctx, netConfigApply, + metav1.ApplyOptions{FieldManager: fieldManager, Force: true}) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Eventually(func() bool { + pods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=network-check-target"}) + if err != nil { + framework.Logf("Error getting pods in %s namespace: %v", namespace, err) + return false + } + if len(pods.Items) != 0 { + framework.Logf("Target diagnostics pods found") + return false + } + cfg, err := oc.AdminConfigClient().ConfigV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster config: %v", err) + return false + } + return meta.IsStatusConditionTrue(cfg.Status.Conditions, condition) + }, 5*time.Minute, 10*time.Second).Should(o.BeTrue()) + }) + + g.It("Should set the condition to false if there are no nodes able to host the source pods", func(ctx context.Context) { + netConfigApply := applyconfigv1.Network(clusterConfig).WithSpec( + applyconfigv1.NetworkSpec().WithNetworkDiagnostics( + applyconfigv1.NetworkDiagnostics(). + WithMode(configv1.NetworkDiagnosticsAll). + WithSourcePlacement( + applyconfigv1.NetworkDiagnosticsSourcePlacement(). + WithNodeSelector(map[string]string{"alien": ""}), + ), + ), + ) + _, err := oc.AdminConfigClient().ConfigV1().Networks().Apply(ctx, netConfigApply, + metav1.ApplyOptions{FieldManager: fieldManager, Force: true}) + o.Expect(err).NotTo(o.HaveOccurred()) + + o.Eventually(func() bool { + // Should not affect the Progressing condition of network.operator + oper, err := oc.AdminOperatorClient().OperatorV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster operator: %v", err) + return false + } + for _, operCondition := range oper.Status.Conditions { + if operCondition.Type == "Progressing" && operCondition.Status != v1.ConditionFalse { + framework.Logf("Invalid progressing condition: %v", operCondition) + return false + } + } + + cfg, err := oc.AdminConfigClient().ConfigV1().Networks().Get(ctx, clusterConfig, metav1.GetOptions{}) + if err != nil { + framework.Logf("Error getting cluster config: %v", err) + return false + } + return meta.IsStatusConditionFalse(cfg.Status.Conditions, condition) + }, 5*time.Minute, 10*time.Second).Should(o.BeTrue()) + }) + +}) diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index d473ef38bd21..dfa6f2922dd2 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -1423,6 +1423,18 @@ var Annotations = map[string]string{ "[sig-network][Feature:vlan] should create pingable pods with vlan interface on an in-container master [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + "[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should be enabled by default": " [Suite:openshift/conformance/parallel]", + + "[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should function without any target pods": " [Suite:openshift/conformance/parallel]", + + "[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should move the source diagnostics pods based on the new selector and tolerations": " [Suite:openshift/conformance/parallel]", + + "[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should move the target diagnostics pods based on the new selector and tolerations": " [Suite:openshift/conformance/parallel]", + + "[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should remove all network diagnostics pods when disabled": " [Suite:openshift/conformance/parallel]", + + "[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should set the condition to false if there are no nodes able to host the source pods": " [Suite:openshift/conformance/parallel]", + "[sig-network][endpoints] admission [apigroup:config.openshift.io] blocks manual creation of EndpointSlices pointing to the cluster or service network": " [Suite:openshift/conformance/parallel]", "[sig-network][endpoints] admission [apigroup:config.openshift.io] blocks manual creation of Endpoints pointing to the cluster or service network": " [Suite:openshift/conformance/parallel]", diff --git a/zz_generated.manifests/test-reporting.yaml b/zz_generated.manifests/test-reporting.yaml index ca71df89a37a..3d43d9d1ea66 100644 --- a/zz_generated.manifests/test-reporting.yaml +++ b/zz_generated.manifests/test-reporting.yaml @@ -9,6 +9,20 @@ spec: tests: - testName: '[sig-arch][OCPFeatureGate:Example] should only run FeatureGated test when enabled' + - featureGate: NetworkDiagnosticsConfig + tests: + - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should be + enabled by default' + - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should function + without any target pods' + - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should move + the source diagnostics pods based on the new selector and tolerations' + - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should move + the target diagnostics pods based on the new selector and tolerations' + - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should remove + all network diagnostics pods when disabled' + - testName: '[sig-network][OCPFeatureGate:NetworkDiagnosticsConfig] Should set + the condition to false if there are no nodes able to host the source pods' - featureGate: ValidatingAdmissionPolicy tests: - testName: '[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin]