diff --git a/tests/utils.go b/tests/utils.go index 3f7bd64a1748..85653be8a4c2 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -592,6 +592,11 @@ func CreateHostPathPvWithSize(osName string, hostPath string, size string) { hostPathType := k8sv1.HostPathDirectoryOrCreate name := fmt.Sprintf("%s-disk-for-tests", osName) + + nodes := GetAllSchedulableNodes(virtCli) + Expect(len(nodes.Items) > 0).To(BeTrue()) + nodeName := nodes.Items[0].Name + pv := &k8sv1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -620,7 +625,7 @@ func CreateHostPathPvWithSize(osName string, hostPath string, size string) { { Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, - Values: []string{"node01"}, + Values: []string{nodeName}, }, }, }, @@ -1550,6 +1555,12 @@ func newBlockVolumePV(name string, labelSelector map[string]string, size string) storageClass := StorageClassBlockVolume volumeMode := k8sv1.PersistentVolumeBlock + virtCli, err := kubecli.GetKubevirtClient() + PanicOnError(err) + + nodes := GetAllSchedulableNodes(virtCli) + Expect(len(nodes.Items) > 0).To(BeTrue()) + nodeName := nodes.Items[0].Name // Note: the path depends on kubevirtci! // It's configured to have a device backed by a cirros image at exactly that place on node01 // And the local storage provider also has access to it @@ -1578,7 +1589,7 @@ func newBlockVolumePV(name string, labelSelector map[string]string, size string) { Key: "kubernetes.io/hostname", Operator: k8sv1.NodeSelectorOpIn, - Values: []string{"node01"}, + Values: []string{nodeName}, }, }, }, diff --git a/tests/vmi_configuration_test.go b/tests/vmi_configuration_test.go index 474b1e3b61b9..347a72ae67cd 100644 --- a/tests/vmi_configuration_test.go +++ b/tests/vmi_configuration_test.go @@ -1121,6 +1121,24 @@ var _ = Describe("Configurations", func() { }) Describe("[rfe_id:897][crit:medium][vendor:cnv-qe@redhat.com][level:component]VirtualMachineInstance with CPU pinning", func() { var nodes *kubev1.NodeList + + isNodeHasCPUManagerLabel := func(nodeName string) bool { + Expect(nodeName).ToNot(BeEmpty()) + + nodeObject, err := virtClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + nodeHaveCpuManagerLabel := false + nodeLabels := nodeObject.GetLabels() + + for label, val := range nodeLabels { + if label == v1.CPUManager && val == "true" { + nodeHaveCpuManagerLabel = true + break + } + } + return nodeHaveCpuManagerLabel + } + BeforeEach(func() { nodes, err = virtClient.CoreV1().Nodes().List(metav1.ListOptions{}) tests.PanicOnError(err) @@ -1128,6 +1146,7 @@ var _ = Describe("Configurations", func() { Skip("Skip cpu pinning test that requires multiple nodes when only one node is present.") } }) + Context("with cpu pinning enabled", func() { It("[test_id:1684]should set the cpumanager label to false when it's not running", func() { @@ -1170,11 +1189,13 @@ var _ = Describe("Configurations", func() { kubev1.ResourceMemory: resource.MustParse("64M"), }, } + By("Starting a VirtualMachineInstance") _, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(cpuVmi) Expect(err).ToNot(HaveOccurred()) node := tests.WaitForSuccessfulVMIStart(cpuVmi) - Expect(node).NotTo(ContainSubstring("node01")) + + Expect(isNodeHasCPUManagerLabel(node)).To(BeTrue()) By("Checking that the pod QOS is guaranteed") readyPod := tests.GetRunningPodByVirtualMachineInstance(cpuVmi, tests.NamespaceTestDefault) @@ -1234,7 +1255,7 @@ var _ = Describe("Configurations", func() { _, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(cpuVmi) Expect(err).ToNot(HaveOccurred()) node := tests.WaitForSuccessfulVMIStart(cpuVmi) - Expect(node).NotTo(ContainSubstring("node01")) + Expect(isNodeHasCPUManagerLabel(node)).To(BeTrue()) By("Expecting the VirtualMachineInstance console") expecter, err := tests.LoggedInCirrosExpecter(cpuVmi) @@ -1322,13 +1343,13 @@ var _ = Describe("Configurations", func() { _, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(cpuVmi) Expect(err).ToNot(HaveOccurred()) node := tests.WaitForSuccessfulVMIStart(cpuVmi) - Expect(node).To(ContainSubstring("node02")) + Expect(isNodeHasCPUManagerLabel(node)).To(BeTrue()) By("Starting a VirtualMachineInstance without dedicated cpus") _, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(Vmi) Expect(err).ToNot(HaveOccurred()) - node1 := tests.WaitForSuccessfulVMIStart(Vmi) - Expect(node1).To(ContainSubstring("node02")) + node = tests.WaitForSuccessfulVMIStart(cpuVmi) + Expect(isNodeHasCPUManagerLabel(node)).To(BeTrue()) }) }) })