diff --git a/hack/dockerized b/hack/dockerized index d2d5c1c49224..6352cfaa68b4 100755 --- a/hack/dockerized +++ b/hack/dockerized @@ -44,14 +44,14 @@ _rsync() { rm ${KUBEVIRT_DIR}/.glide*.hash -f # Copy kubevirt into the persistent docker volume -_rsync --delete --exclude 'cluster/vagrant*' --exclude "_out" --exclude ".vagrant" ${KUBEVIRT_DIR}/ "rsync://root@127.0.0.1:${RSYNCD_PORT}/build" +_rsync --delete --exclude 'cluster/**/.kubectl' --exclude 'cluster/**/.oc' --exclude 'cluster/**/.kubeconfig' --exclude "_out" --exclude ".vagrant" ${KUBEVIRT_DIR}/ "rsync://root@127.0.0.1:${RSYNCD_PORT}/build" # Run the command test -t 1 && USE_TTY="-it" docker run --rm -v "${BUILDER}:/root:rw,z" ${USE_TTY} -w "/root/go/src/kubevirt.io/kubevirt" ${BUILDER} "$@" # Copy the whole kubevirt data out to get generated sources and formatting changes -_rsync --exclude '.glide*' --exclude "_out" --exclude "vendor" --exclude ".vagrant" --exclude ".git" "rsync://root@127.0.0.1:${RSYNCD_PORT}/build" ${KUBEVIRT_DIR}/ +_rsync --exclude '.glide*' --exclude 'cluster/**/.kubectl' --exclude 'cluster/**/.oc' --exclude 'cluster/**/.kubeconfig' --exclude "_out" --exclude "vendor" --exclude ".vagrant" --exclude ".git" "rsync://root@127.0.0.1:${RSYNCD_PORT}/build" ${KUBEVIRT_DIR}/ if [ "$SYNC_VENDOR" = "true" ]; then _rsync --delete "rsync://root@127.0.0.1:${RSYNCD_PORT}/vendor" ${VENDOR_DIR} fi diff --git a/hack/functests.sh b/hack/functests.sh index 052382fcffb3..1a0d891286e1 100755 --- a/hack/functests.sh +++ b/hack/functests.sh @@ -22,4 +22,4 @@ set -e source hack/common.sh source hack/config.sh -${TESTS_OUT_DIR}/tests.test -kubeconfig=${kubeconfig} -test.timeout 40m ${FUNC_TEST_ARGS} +${TESTS_OUT_DIR}/tests.test -kubeconfig=${kubeconfig} -tag=${docker_tag} -prefix=${docker_prefix} -test.timeout 40m ${FUNC_TEST_ARGS} diff --git a/tests/console_test.go b/tests/console_test.go index 8e49a87954b2..bb401a36e212 100644 --- a/tests/console_test.go +++ b/tests/console_test.go @@ -66,7 +66,7 @@ var _ = Describe("Console", func() { Context("with a cirros image", func() { It("should return that we are running cirros", func() { RunVMAndExpectConsoleOutput( - "kubevirt/cirros-registry-disk-demo:devel", + tests.RegistryDiskFor(tests.RegistryDiskCirros), "checking http://169.254.169.254/2009-04-04/instance-id", ) }, 140) @@ -75,14 +75,14 @@ var _ = Describe("Console", func() { Context("with a fedora image", func() { It("should return that we are running fedora", func() { RunVMAndExpectConsoleOutput( - "kubevirt/fedora-cloud-registry-disk-demo:devel", + tests.RegistryDiskFor(tests.RegistryDiskFedora), "Welcome to", ) }, 140) }) It("should be able to reconnect to console multiple times", func() { - vm := tests.NewRandomVMWithEphemeralDisk("kubevirt/alpine-registry-disk-demo:devel") + vm := tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskAlpine)) By("Creating a new VM") Expect(virtClient.RestClient().Post().Resource("virtualmachines").Namespace(tests.NamespaceTestDefault).Body(vm).Do().Error()).To(Succeed()) diff --git a/tests/registry_disk_test.go b/tests/registry_disk_test.go index 0d1410913ca1..6114cf5ef13a 100644 --- a/tests/registry_disk_test.go +++ b/tests/registry_disk_test.go @@ -91,7 +91,7 @@ var _ = Describe("RegistryDisk", func() { Describe("Starting and stopping the same VM", func() { Context("with ephemeral registry disk", func() { It("should success multiple times", func(done Done) { - vm := tests.NewRandomVMWithEphemeralDisk("kubevirt/cirros-registry-disk-demo:devel") + vm := tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskCirros)) num := 2 for i := 0; i < num; i++ { By("Starting the VM") @@ -112,7 +112,7 @@ var _ = Describe("RegistryDisk", func() { Describe("Starting a VM", func() { Context("with ephemeral registry disk", func() { It("should not modify the spec on status update", func() { - vm := tests.NewRandomVMWithEphemeralDisk("kubevirt/cirros-registry-disk-demo:devel") + vm := tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskCirros)) v1.SetObjectDefaults_VirtualMachine(vm) By("Starting the VM") @@ -134,7 +134,7 @@ var _ = Describe("RegistryDisk", func() { vms := make([]*v1.VirtualMachine, 0, num) objs := make([]runtime.Object, 0, num) for i := 0; i < num; i++ { - vm := tests.NewRandomVMWithEphemeralDisk("kubevirt/cirros-registry-disk-demo:devel") + vm := tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskCirros)) // FIXME if we give too much ram, the vms really boot and eat all our memory (cache?) vm.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("1M") obj := LaunchVM(vm) diff --git a/tests/replicaset_test.go b/tests/replicaset_test.go index 549ab65d5b4e..ee2d6103ff4a 100644 --- a/tests/replicaset_test.go +++ b/tests/replicaset_test.go @@ -81,7 +81,7 @@ var _ = Describe("VirtualMachineReplicaSet", func() { newReplicaSet := func() *v1.VirtualMachineReplicaSet { By("Create a new VM replica set") - template := tests.NewRandomVMWithEphemeralDisk("kubevirt/cirros-registry-disk-demo:devel") + template := tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskCirros)) newRS := tests.NewRandomReplicaSetFromVM(template, int32(0)) newRS, err = virtClient.ReplicaSet(tests.NamespaceTestDefault).Create(newRS) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/utils.go b/tests/utils.go index dbfcde90c8e7..e12075ed6e64 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -42,11 +42,21 @@ import ( "github.com/google/goexpect" + "flag" + "kubevirt.io/kubevirt/pkg/api/v1" "kubevirt.io/kubevirt/pkg/kubecli" "kubevirt.io/kubevirt/pkg/log" ) +var KubeVirtVersionTag string = "latest" +var KubeVirtRepoPrefix string = "kubevirt" + +func init() { + flag.StringVar(&KubeVirtVersionTag, "tag", "latest", "Set the image tag or digest to use") + flag.StringVar(&KubeVirtRepoPrefix, "prefix", "kubevirt", "Set the repository prefix for all images") +} + type EventType string const ( @@ -87,22 +97,11 @@ const ( DiskAlpineISCSI = "disk-alpine-iscsi" ) -const ( - labelISCSIPod = "iscsi-demo-target" - labelISCSIWithAuthPod = "iscsi-auth-demo-target" - labelNFSPod = "nfs-server-demo" -) - const ( iscsiIqn = "iqn.2017-01.io.kubevirt:sn.42" iscsiSecretName = "iscsi-demo-secret" ) -const ( - nfsPathAlpine = "/nfsshare/alpine" - nfsPathCirros = "/nfsshare/cirros" -) - type ProcessFunc func(event *k8sv1.Event) (done bool) type ObjectEventWatcher struct { @@ -383,49 +382,6 @@ func newPvISCSI(os string, targetIp string, lun int32, withAuth bool) *k8sv1.Per return pv } -func createPvNFS(os string, path string) { - virtCli, err := kubecli.GetKubevirtClient() - PanicOnError(err) - - nfsServer := getPodIpByLabel(labelNFSPod) - - _, err = virtCli.CoreV1().PersistentVolumes().Create(newPvNFS(os, nfsServer, path)) - if !errors.IsAlreadyExists(err) { - PanicOnError(err) - } -} - -func newPvNFS(os string, nfsServer string, path string) *k8sv1.PersistentVolume { - quantity, err := resource.ParseQuantity("1Gi") - PanicOnError(err) - - name := fmt.Sprintf("%s-disk-for-tests", os) - label := os - - pv := &k8sv1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "kubevirt.io/test": label, - }, - }, - Spec: k8sv1.PersistentVolumeSpec{ - AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce}, - Capacity: k8sv1.ResourceList{ - "storage": quantity, - }, - PersistentVolumeReclaimPolicy: k8sv1.PersistentVolumeReclaimRetain, - PersistentVolumeSource: k8sv1.PersistentVolumeSource{ - NFS: &k8sv1.NFSVolumeSource{ - Server: nfsServer, - Path: path, - }, - }, - }, - } - return pv -} - func deletePVC(os string, withAuth bool) { virtCli, err := kubecli.GetKubevirtClient() PanicOnError(err) @@ -700,7 +656,7 @@ func NewRandomVMWithPVC(claimName string) *v1.VirtualMachine { } func NewRandomVMWithWatchdog() *v1.VirtualMachine { - vm := NewRandomVMWithEphemeralDisk("kubevirt/alpine-registry-disk-demo:devel") + vm := NewRandomVMWithEphemeralDisk(RegistryDiskFor(RegistryDiskAlpine)) vm.Spec.Domain.Devices.Watchdog = &v1.Watchdog{ Name: "mywatchdog", @@ -788,7 +744,7 @@ func NewBool(x bool) *bool { return &x } -func RenderJob(name string, dockerTag string, cmd []string, args []string) *k8sv1.Pod { +func RenderJob(name string, cmd []string, args []string) *k8sv1.Pod { job := k8sv1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: name, @@ -801,7 +757,7 @@ func RenderJob(name string, dockerTag string, cmd []string, args []string) *k8sv Containers: []k8sv1.Container{ { Name: name, - Image: "kubevirt/vm-killer:" + dockerTag, + Image: fmt.Sprintf("%s/vm-killer:%s", KubeVirtRepoPrefix, KubeVirtVersionTag), Command: cmd, Args: args, SecurityContext: &k8sv1.SecurityContext{ @@ -843,3 +799,22 @@ func NewConsoleExpecter(virtCli kubecli.KubevirtClient, vm *v1.VirtualMachine, c Check: func() bool { return true }, }, timeout, opts...) } + +type RegistryDisk string + +const ( + RegistryDiskCirros RegistryDisk = "cirros" + RegistryDiskAlpine RegistryDisk = "alpine" + RegistryDiskFedora RegistryDisk = "fedora-cloud" +) + +// RegistryDiskFor takes the name of an image and returns the full +// registry diks image path. +// Supported values are: cirros, fedora, alpine +func RegistryDiskFor(name RegistryDisk) string { + switch name { + case RegistryDiskCirros, RegistryDiskAlpine, RegistryDiskFedora: + return fmt.Sprintf("%s/%s-registry-disk-demo:%s", KubeVirtRepoPrefix, name, KubeVirtVersionTag) + } + panic(fmt.Sprintf("Unsupported registry disk %s", name)) +} diff --git a/tests/vm_configuration_test.go b/tests/vm_configuration_test.go index 70d403d9626c..96d22b8ba682 100644 --- a/tests/vm_configuration_test.go +++ b/tests/vm_configuration_test.go @@ -48,7 +48,7 @@ var _ = Describe("Configurations", func() { var vm *v1.VirtualMachine BeforeEach(func() { - vm = tests.NewRandomVMWithEphemeralDisk("kubevirt/alpine-registry-disk-demo:devel") + vm = tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskAlpine)) }) It("should report 3 cpu cores under guest OS", func() { vm.Spec.Domain.CPU = &v1.CPU{ @@ -88,7 +88,7 @@ var _ = Describe("Configurations", func() { BeforeEach(func() { // ordering: // use a small disk for the other ones - containerImage := "kubevirt/cirros-registry-disk-demo:devel" + containerImage := tests.RegistryDiskFor(tests.RegistryDiskCirros) // virtio - added by NewRandomVMWithEphemeralDisk vm = tests.NewRandomVMWithEphemeralDiskAndUserdata(containerImage, "echo hi!\n") // sata diff --git a/tests/vm_networking_test.go b/tests/vm_networking_test.go index be1f63fd00b1..027e352b456b 100644 --- a/tests/vm_networking_test.go +++ b/tests/vm_networking_test.go @@ -21,7 +21,6 @@ package tests_test import ( "flag" - "os" "time" . "github.com/onsi/ginkgo" @@ -44,10 +43,6 @@ import ( ) var _ = Describe("Networking", func() { - dockerTag := os.Getenv("docker_tag") - if dockerTag == "" { - dockerTag = "latest" - } flag.Parse() @@ -64,7 +59,7 @@ var _ = Describe("Networking", func() { var wg sync.WaitGroup createAndLogin := func() (vm *v1.VirtualMachine) { - vm = tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", "#!/bin/bash\necho 'hello'\n") + vm = tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), "#!/bin/bash\necho 'hello'\n") // Start VM vm, err = virtClient.VM(tests.NamespaceTestDefault).Create(vm) @@ -153,7 +148,7 @@ var _ = Describe("Networking", func() { // Run netcat and give it one second to ghet "Hello World!" back from the VM check := []string{fmt.Sprintf("while read x; do test \"$x\" = \"Hello World!\"; exit $?; done < <(nc %s 1500 -i 1 -w 1)", ip)} - job := tests.RenderJob("netcat", dockerTag, []string{"/bin/bash", "-c"}, check) + job := tests.RenderJob("netcat", []string{"/bin/bash", "-c"}, check) job.Spec.Affinity = &v12.Affinity{ NodeAffinity: &v12.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v12.NodeSelector{ diff --git a/tests/vm_userdata_test.go b/tests/vm_userdata_test.go index 1664a0084c6a..3c409cfedf23 100644 --- a/tests/vm_userdata_test.go +++ b/tests/vm_userdata_test.go @@ -78,7 +78,7 @@ var _ = Describe("CloudInit UserData", func() { magicStr := "printed from cloud-init userdata" userData := fmt.Sprintf("#!/bin/sh\n\necho '%s'\n", magicStr) - vm := tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", userData) + vm := tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), userData) obj := LaunchVM(vm) VerifyUserDataVM(vm, obj, magicStr) close(done) @@ -88,7 +88,7 @@ var _ = Describe("CloudInit UserData", func() { It("should take user-data from k8s secret", func(done Done) { magicStr := "printed from cloud-init userdata" userData := fmt.Sprintf("#!/bin/sh\n\necho '%s'\n", magicStr) - vm := tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", userData) + vm := tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), userData) for _, volume := range vm.Spec.Volumes { if volume.CloudInitNoCloud == nil { diff --git a/tests/vmlifecycle_test.go b/tests/vmlifecycle_test.go index 0ac6043fda9f..4c6192a89136 100644 --- a/tests/vmlifecycle_test.go +++ b/tests/vmlifecycle_test.go @@ -40,11 +40,6 @@ import ( var _ = Describe("Vmlifecycle", func() { - dockerTag := os.Getenv("docker_tag") - if dockerTag == "" { - dockerTag = "latest" - } - flag.Parse() virtClient, err := kubecli.GetKubevirtClient() @@ -54,7 +49,7 @@ var _ = Describe("Vmlifecycle", func() { BeforeEach(func() { tests.BeforeTestCleanup() - vm = tests.NewRandomVMWithEphemeralDisk("kubevirt/alpine-registry-disk-demo:devel") + vm = tests.NewRandomVMWithEphemeralDisk(tests.RegistryDiskFor(tests.RegistryDiskAlpine)) }) Describe("Creating a VM", func() { @@ -105,7 +100,7 @@ var _ = Describe("Vmlifecycle", func() { Context("without k8s secret", func() { It("should retry starting the VM", func(done Done) { userData := fmt.Sprintf("#!/bin/sh\n\necho 'hi'\n") - vm = tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", userData) + vm = tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), userData) for _, volume := range vm.Spec.Volumes { if volume.CloudInitNoCloud != nil { @@ -137,7 +132,7 @@ var _ = Describe("Vmlifecycle", func() { It("should log warning and proceed once the secret is there", func(done Done) { userData := fmt.Sprintf("#!/bin/sh\n\necho 'hi'\n") userData64 := "" - vm = tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", userData) + vm = tests.NewRandomVMWithEphemeralDiskAndUserdata(tests.RegistryDiskFor(tests.RegistryDiskCirros), userData) for _, volume := range vm.Spec.Volumes { if volume.CloudInitNoCloud != nil { @@ -195,7 +190,7 @@ var _ = Describe("Vmlifecycle", func() { time.Sleep(10 * time.Second) By("Crashing the virt-launcher") - err = pkillAllLaunchers(virtClient, nodeName, dockerTag) + err = pkillAllLaunchers(virtClient, nodeName) Expect(err).To(BeNil()) tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().WaitFor(tests.WarningEvent, v1.Stopped) @@ -226,13 +221,13 @@ var _ = Describe("Vmlifecycle", func() { // Kill virt-handler on the node the VM is active on. time.Sleep(5 * time.Second) By("Crashing the virt-handler") - err = pkillAllHandlers(virtClient, nodeName, dockerTag) + err = pkillAllHandlers(virtClient, nodeName) Expect(err).To(BeNil()) // Crash the VM and verify a recovered version of virt-handler processes the crash time.Sleep(5 * time.Second) By("Killing the VM") - err = pkillAllVms(virtClient, nodeName, dockerTag) + err = pkillAllVms(virtClient, nodeName) Expect(err).To(BeNil()) tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().WaitFor(tests.WarningEvent, v1.Stopped) @@ -391,7 +386,7 @@ var _ = Describe("Vmlifecycle", func() { By("Killing the VM") time.Sleep(10 * time.Second) - err = pkillAllVms(virtClient, nodeName, dockerTag) + err = pkillAllVms(virtClient, nodeName) Expect(err).To(BeNil()) tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().WaitFor(tests.WarningEvent, v1.Stopped) @@ -418,7 +413,7 @@ var _ = Describe("Vmlifecycle", func() { Expect(err).ToNot(HaveOccurred()) By("Killing the VM") - err = pkillAllVms(virtClient, nodeName, dockerTag) + err = pkillAllVms(virtClient, nodeName) Expect(err).To(BeNil()) // Wait for stop event of the VM @@ -435,39 +430,8 @@ var _ = Describe("Vmlifecycle", func() { }) }) -func renderPkillAllJob(dockerTag string, processName string) *k8sv1.Pod { - job := k8sv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "vm-killer", - Labels: map[string]string{ - v1.AppLabel: "test", - }, - }, - Spec: k8sv1.PodSpec{ - RestartPolicy: k8sv1.RestartPolicyNever, - Containers: []k8sv1.Container{ - { - Name: "vm-killer", - Image: "kubevirt/vm-killer:" + dockerTag, - Command: []string{ - "pkill", - "-9", - processName, - }, - SecurityContext: &k8sv1.SecurityContext{ - Privileged: newBool(true), - RunAsUser: new(int64), - }, - }, - }, - HostPID: true, - SecurityContext: &k8sv1.PodSecurityContext{ - RunAsUser: new(int64), - }, - }, - } - - return &job +func renderPkillAllJob(processName string) *k8sv1.Pod { + return tests.RenderJob("vm-killer", []string{"pkill"}, []string{"-9", processName}) } func getVirtLauncherLogs(virtCli kubecli.KubevirtClient, vm *v1.VirtualMachine) string { @@ -501,8 +465,8 @@ func getVirtLauncherLogs(virtCli kubecli.KubevirtClient, vm *v1.VirtualMachine) return string(logsRaw) } -func pkillAllHandlers(virtCli kubecli.KubevirtClient, node, dockerTag string) error { - job := renderPkillAllJob(dockerTag, "virt-handler") +func pkillAllHandlers(virtCli kubecli.KubevirtClient, node string) error { + job := renderPkillAllJob("virt-handler") job.Spec.NodeName = node pod, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job) Expect(err).ToNot(HaveOccurred()) @@ -518,22 +482,18 @@ func pkillAllHandlers(virtCli kubecli.KubevirtClient, node, dockerTag string) er return err } -func pkillAllLaunchers(virtCli kubecli.KubevirtClient, node, dockerTag string) error { - job := renderPkillAllJob(dockerTag, "virt-launcher") +func pkillAllLaunchers(virtCli kubecli.KubevirtClient, node string) error { + job := renderPkillAllJob("virt-launcher") job.Spec.NodeName = node _, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job) return err } -func pkillAllVms(virtCli kubecli.KubevirtClient, node, dockerTag string) error { - job := renderPkillAllJob(dockerTag, "qemu") +func pkillAllVms(virtCli kubecli.KubevirtClient, node string) error { + job := renderPkillAllJob("qemu") job.Spec.NodeName = node _, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job) return err } - -func newBool(x bool) *bool { - return &x -}