From 340b19ca2b1e89a1970b7c14901c67f9b10fd05c Mon Sep 17 00:00:00 2001 From: David Vossel Date: Thu, 28 Feb 2019 12:27:38 -0500 Subject: [PATCH 01/13] Add custom scc object to install strategy Signed-off-by: David Vossel --- .../install-strategy/strategy.go | 332 +++++++++++++++++- 1 file changed, 330 insertions(+), 2 deletions(-) diff --git a/pkg/virt-operator/install-strategy/strategy.go b/pkg/virt-operator/install-strategy/strategy.go index 96629d402918..000d835cd475 100644 --- a/pkg/virt-operator/install-strategy/strategy.go +++ b/pkg/virt-operator/install-strategy/strategy.go @@ -27,6 +27,7 @@ import ( "github.com/ghodss/yaml" + secv1 "github.com/openshift/api/security/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -45,6 +46,21 @@ import ( marshalutil "kubevirt.io/kubevirt/tools/util" ) +const customSccPrivilegedAccountsType = "KubevirtCustomSccRule" + +type customSccPrivilegedAccounts struct { + // this isn't a real k8s object. We use the meta type + // because it gives a consistent way to separate k8s + // objects from our custom actions + metav1.TypeMeta `json:",inline"` + + // this is the target scc we're adding service accounts to + TargetScc string `json:"TargetScc"` + + // these are the service accounts being added to the scc + ServiceAccounts []string `json:"serviceAccounts"` +} + type InstallStrategy struct { serviceAccounts []*corev1.ServiceAccount @@ -59,6 +75,8 @@ type InstallStrategy struct { services []*corev1.Service deployments []*appsv1.Deployment daemonSets []*appsv1.DaemonSet + + customSccPrivileges []*customSccPrivilegedAccounts } func generateConfigMapName(imageTag string) string { @@ -157,6 +175,9 @@ func dumpInstallStrategyToBytes(strategy *InstallStrategy) []byte { for _, entry := range strategy.daemonSets { marshalutil.MarshallObject(entry, writer) } + for _, entry := range strategy.customSccPrivileges { + marshalutil.MarshallObject(entry, writer) + } writer.Flush() return b.Bytes() @@ -229,6 +250,20 @@ func GenerateCurrentInstallStrategy(namespace string, } strategy.daemonSets = append(strategy.daemonSets, handler) + prefix := "system:serviceaccount" + typeMeta := metav1.TypeMeta{ + Kind: customSccPrivilegedAccountsType, + } + strategy.customSccPrivileges = append(strategy.customSccPrivileges, &customSccPrivilegedAccounts{ + TypeMeta: typeMeta, + TargetScc: "privileged", + ServiceAccounts: []string{ + fmt.Sprintf("%s:%s:%s", prefix, namespace, "kubevirt-handler"), + fmt.Sprintf("%s:%s:%s", prefix, namespace, "kubevirt-apiserver"), + fmt.Sprintf("%s:%s:%s", prefix, namespace, "kubevirt-controller"), + }, + }) + return strategy, nil } @@ -330,8 +365,12 @@ func loadInstallStrategyFromBytes(data string) (*InstallStrategy, error) { return nil, err } strategy.crds = append(strategy.crds, crd) - case "Namespace": - // skipped. We don't do anything with namespaces + case customSccPrivilegedAccountsType: + priv := &customSccPrivilegedAccounts{} + if err := yaml.Unmarshal([]byte(entry), &priv); err != nil { + return nil, err + } + strategy.customSccPrivileges = append(strategy.customSccPrivileges, priv) default: return nil, fmt.Errorf("UNKNOWN TYPE %s detected", obj.Kind) @@ -348,6 +387,261 @@ func addOperatorLabel(objectMeta *metav1.ObjectMeta) { objectMeta.Labels[v1.ManagedByLabel] = v1.ManagedByLabelOperatorValue } +func remove(users []string, user string) ([]string, bool) { + var newUsers []string + modified := false + for _, u := range users { + if u != user { + newUsers = append(newUsers, u) + } else { + modified = true + } + } + return newUsers, modified +} + +func contains(users []string, user string) bool { + for _, u := range users { + if u == user { + return true + } + } + return false +} + +func DeleteAll(kv *v1.KubeVirt, + strategy *InstallStrategy, + stores util.Stores, + clientset kubecli.KubevirtClient, + expectations *util.Expectations) error { + + kvkey, err := controller.KeyFunc(kv) + if err != nil { + return err + } + + gracePeriod := int64(0) + deleteOptions := &metav1.DeleteOptions{ + GracePeriodSeconds: &gracePeriod, + } + + // first delete CRDs only + ext := clientset.ExtensionsClient() + objects := stores.CrdCache.List() + for _, obj := range objects { + if crd, ok := obj.(*extv1beta1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(crd); err == nil { + expectations.Crd.AddExpectedDeletion(kvkey, key) + err := ext.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(crd.Name, deleteOptions) + if err != nil { + expectations.Crd.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete crd %+v: %v", crd, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + + } + if !util.IsStoreEmpty(stores.CrdCache) { + // wait until CRDs are gone + return nil + } + + // delete handler daemonset + obj, exists, err := stores.DaemonSetCache.GetByKey(fmt.Sprintf("%s/%s", kv.Namespace, "virt-handler")) + if err != nil { + log.Log.Errorf("Failed to get virt-handler: %v", err) + return err + } else if exists { + if ds, ok := obj.(*appsv1.DaemonSet); ok && ds.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(ds); err == nil { + expectations.DaemonSet.AddExpectedDeletion(kvkey, key) + err := clientset.AppsV1().DaemonSets(kv.Namespace).Delete("virt-handler", deleteOptions) + if err != nil { + expectations.DaemonSet.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete virt-handler: %v", err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + // delete controller and apiserver deployment + for _, name := range []string{"virt-controller", "virt-api"} { + obj, exists, err := stores.DeploymentCache.GetByKey(fmt.Sprintf("%s/%s", kv.Namespace, name)) + if err != nil { + log.Log.Errorf("Failed to get %v: %v", name, err) + return err + } else if exists { + if depl, ok := obj.(*appsv1.Deployment); ok && depl.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(depl); err == nil { + expectations.Deployment.AddExpectedDeletion(kvkey, key) + err := clientset.AppsV1().Deployments(kv.Namespace).Delete(name, deleteOptions) + if err != nil { + expectations.Deployment.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete virt-handler: %v", err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + } + + // delete services + objects = stores.ServiceCache.List() + for _, obj := range objects { + if svc, ok := obj.(*corev1.Service); ok && svc.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(svc); err == nil { + expectations.Service.AddExpectedDeletion(kvkey, key) + err := clientset.CoreV1().Services(kv.Namespace).Delete(svc.Name, deleteOptions) + if err != nil { + expectations.Service.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete service %+v: %v", svc, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + // delete RBAC + objects = stores.ClusterRoleBindingCache.List() + for _, obj := range objects { + if crb, ok := obj.(*rbacv1.ClusterRoleBinding); ok && crb.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(crb); err == nil { + expectations.ClusterRoleBinding.AddExpectedDeletion(kvkey, key) + err := clientset.RbacV1().ClusterRoleBindings().Delete(crb.Name, deleteOptions) + if err != nil { + expectations.ClusterRoleBinding.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete crb %+v: %v", crb, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + objects = stores.ClusterRoleCache.List() + for _, obj := range objects { + if cr, ok := obj.(*rbacv1.ClusterRole); ok && cr.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(cr); err == nil { + expectations.ClusterRole.AddExpectedDeletion(kvkey, key) + err := clientset.RbacV1().ClusterRoles().Delete(cr.Name, deleteOptions) + if err != nil { + expectations.ClusterRole.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete cr %+v: %v", cr, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + objects = stores.RoleBindingCache.List() + for _, obj := range objects { + if rb, ok := obj.(*rbacv1.RoleBinding); ok && rb.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(rb); err == nil { + expectations.RoleBinding.AddExpectedDeletion(kvkey, key) + err := clientset.RbacV1().RoleBindings(kv.Namespace).Delete(rb.Name, deleteOptions) + if err != nil { + expectations.RoleBinding.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete rb %+v: %v", rb, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + objects = stores.RoleCache.List() + for _, obj := range objects { + if role, ok := obj.(*rbacv1.Role); ok && role.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(role); err == nil { + expectations.Role.AddExpectedDeletion(kvkey, key) + err := clientset.RbacV1().Roles(kv.Namespace).Delete(role.Name, deleteOptions) + if err != nil { + expectations.Role.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete role %+v: %v", role, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + objects = stores.ServiceAccountCache.List() + for _, obj := range objects { + if sa, ok := obj.(*corev1.ServiceAccount); ok && sa.DeletionTimestamp == nil { + if key, err := controller.KeyFunc(sa); err == nil { + expectations.ServiceAccount.AddExpectedDeletion(kvkey, key) + err := clientset.CoreV1().ServiceAccounts(kv.Namespace).Delete(sa.Name, deleteOptions) + if err != nil { + expectations.ServiceAccount.DeletionObserved(kvkey, key) + log.Log.Errorf("Failed to delete serviceaccount %+v: %v", sa, err) + return err + } + } + } else if !ok { + log.Log.Errorf("Cast failed! obj: %+v", obj) + return nil + } + } + + scc := clientset.SecClient() + for _, sccPriv := range strategy.customSccPrivileges { + privSccObj, exists, err := stores.SCCCache.GetByKey(sccPriv.TargetScc) + if !exists { + return nil + } else if err != nil { + return err + } + + privScc, ok := privSccObj.(*secv1.SecurityContextConstraints) + if !ok { + return fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSccObj) + } + privSccCopy := privScc.DeepCopy() + + modified := false + users := privSccCopy.Users + for _, acc := range sccPriv.ServiceAccounts { + removed := false + users, removed = remove(users, acc) + modified = modified || removed + } + + if modified { + privSccCopy.Users = users + _, err = scc.SecurityContextConstraints().Update(privSccCopy) + if err != nil { + return fmt.Errorf("unable to update scc: %v", err) + } + } + } + + return nil + +} + func CreateAll(kv *v1.KubeVirt, strategy *InstallStrategy, stores util.Stores, @@ -361,6 +655,7 @@ func CreateAll(kv *v1.KubeVirt, core := clientset.CoreV1() rbac := clientset.RbacV1() apps := clientset.AppsV1() + scc := clientset.SecClient() // CRDs for _, crd := range strategy.crds { @@ -515,5 +810,38 @@ func CreateAll(kv *v1.KubeVirt, } } + // Add service accounts to SCC + for _, sccPriv := range strategy.customSccPrivileges { + privSccObj, exists, err := stores.SCCCache.GetByKey(sccPriv.TargetScc) + if !exists { + return objectsAdded, nil + } else if err != nil { + return objectsAdded, err + } + + privScc, ok := privSccObj.(*secv1.SecurityContextConstraints) + if !ok { + return objectsAdded, fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSccObj) + } + privSccCopy := privScc.DeepCopy() + + modified := false + users := privSccCopy.Users + for _, acc := range sccPriv.ServiceAccounts { + if !contains(users, acc) { + users = append(users, acc) + modified = true + } + } + + if modified { + privSccCopy.Users = users + _, err = scc.SecurityContextConstraints().Update(privSccCopy) + if err != nil { + return objectsAdded, fmt.Errorf("unable to update scc: %v", err) + } + } + } + return objectsAdded, nil } From c9076c93330d9e0f4bed477d214eadd9cbaa55f9 Mon Sep 17 00:00:00 2001 From: David Vossel Date: Thu, 28 Feb 2019 12:31:24 -0500 Subject: [PATCH 02/13] Remove old operator create/delete functions in favor of install strategy create/delete Signed-off-by: David Vossel --- pkg/virt-operator/BUILD.bazel | 2 - pkg/virt-operator/creation/BUILD.bazel | 15 -- pkg/virt-operator/creation/all.go | 45 ---- pkg/virt-operator/deletion/BUILD.bazel | 20 -- pkg/virt-operator/deletion/all.go | 240 ------------------ .../install-strategy/BUILD.bazel | 1 + pkg/virt-operator/kubevirt.go | 30 ++- pkg/virt-operator/kubevirt_test.go | 5 +- 8 files changed, 24 insertions(+), 334 deletions(-) delete mode 100644 pkg/virt-operator/creation/all.go delete mode 100644 pkg/virt-operator/deletion/BUILD.bazel delete mode 100644 pkg/virt-operator/deletion/all.go diff --git a/pkg/virt-operator/BUILD.bazel b/pkg/virt-operator/BUILD.bazel index a2691f083481..76dfa2ec1488 100644 --- a/pkg/virt-operator/BUILD.bazel +++ b/pkg/virt-operator/BUILD.bazel @@ -16,8 +16,6 @@ go_library( "//pkg/log:go_default_library", "//pkg/service:go_default_library", "//pkg/util:go_default_library", - "//pkg/virt-operator/creation:go_default_library", - "//pkg/virt-operator/deletion:go_default_library", "//pkg/virt-operator/install-strategy:go_default_library", "//pkg/virt-operator/util:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", diff --git a/pkg/virt-operator/creation/BUILD.bazel b/pkg/virt-operator/creation/BUILD.bazel index 91b18968d4dd..e69de29bb2d1 100644 --- a/pkg/virt-operator/creation/BUILD.bazel +++ b/pkg/virt-operator/creation/BUILD.bazel @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["all.go"], - importpath = "kubevirt.io/kubevirt/pkg/virt-operator/creation", - visibility = ["//visibility:public"], - deps = [ - "//pkg/api/v1:go_default_library", - "//pkg/kubecli:go_default_library", - "//pkg/log:go_default_library", - "//pkg/virt-operator/install-strategy:go_default_library", - "//pkg/virt-operator/util:go_default_library", - ], -) diff --git a/pkg/virt-operator/creation/all.go b/pkg/virt-operator/creation/all.go deleted file mode 100644 index ca4c0485d7aa..000000000000 --- a/pkg/virt-operator/creation/all.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * This file is part of the KubeVirt project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2018 Red Hat, Inc. - * - */ - -package creation - -import ( - "kubevirt.io/kubevirt/pkg/api/v1" - "kubevirt.io/kubevirt/pkg/kubecli" - "kubevirt.io/kubevirt/pkg/log" - "kubevirt.io/kubevirt/pkg/virt-operator/install-strategy" - "kubevirt.io/kubevirt/pkg/virt-operator/util" -) - -func Create(kv *v1.KubeVirt, stores util.Stores, clientset kubecli.KubevirtClient, expectations *util.Expectations, strategy *installstrategy.InstallStrategy) (int, error) { - - objectsAdded, err := installstrategy.CreateAll(kv, strategy, stores, clientset, expectations) - - if err != nil { - return objectsAdded, err - } - - err = util.UpdateScc(clientset, stores.SCCCache, kv, true) - if err != nil { - return objectsAdded, err - } - - log.Log.Object(kv).Infof("Created %d objects this round", objectsAdded) - return objectsAdded, nil -} diff --git a/pkg/virt-operator/deletion/BUILD.bazel b/pkg/virt-operator/deletion/BUILD.bazel deleted file mode 100644 index 32678d121bb2..000000000000 --- a/pkg/virt-operator/deletion/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["all.go"], - importpath = "kubevirt.io/kubevirt/pkg/virt-operator/deletion", - visibility = ["//visibility:public"], - deps = [ - "//pkg/api/v1:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/kubecli:go_default_library", - "//pkg/log:go_default_library", - "//pkg/virt-operator/util:go_default_library", - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) diff --git a/pkg/virt-operator/deletion/all.go b/pkg/virt-operator/deletion/all.go deleted file mode 100644 index 2a961b1883bf..000000000000 --- a/pkg/virt-operator/deletion/all.go +++ /dev/null @@ -1,240 +0,0 @@ -/* - * This file is part of the KubeVirt project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2018 Red Hat, Inc. - * - */ - -package deletion - -import ( - "fmt" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "kubevirt.io/kubevirt/pkg/api/v1" - "kubevirt.io/kubevirt/pkg/controller" - "kubevirt.io/kubevirt/pkg/kubecli" - "kubevirt.io/kubevirt/pkg/log" - "kubevirt.io/kubevirt/pkg/virt-operator/util" -) - -func Delete(kv *v1.KubeVirt, clientset kubecli.KubevirtClient, stores util.Stores, expectations *util.Expectations) error { - - kvkey, err := controller.KeyFunc(kv) - if err != nil { - /// XXX this is not correct, we can't even process this object in the cache, we should do nothing - return err - } - - gracePeriod := int64(0) - deleteOptions := &metav1.DeleteOptions{ - GracePeriodSeconds: &gracePeriod, - } - - // first delete CRDs only - ext := clientset.ExtensionsClient() - objects := stores.CrdCache.List() - for _, obj := range objects { - if crd, ok := obj.(*extv1beta1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(crd); err == nil { - expectations.Crd.AddExpectedDeletion(kvkey, key) - err := ext.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(crd.Name, deleteOptions) - if err != nil { - expectations.Crd.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete crd %+v: %v", crd, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - - } - if !util.IsStoreEmpty(stores.CrdCache) { - // wait until CRDs are gone - return nil - } - - // delete handler daemonset - obj, exists, err := stores.DaemonSetCache.GetByKey(fmt.Sprintf("%s/%s", kv.Namespace, "virt-handler")) - if err != nil { - log.Log.Errorf("Failed to get virt-handler: %v", err) - return err - } else if exists { - if ds, ok := obj.(*appsv1.DaemonSet); ok && ds.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(ds); err == nil { - expectations.DaemonSet.AddExpectedDeletion(kvkey, key) - err := clientset.AppsV1().DaemonSets(kv.Namespace).Delete("virt-handler", deleteOptions) - if err != nil { - expectations.DaemonSet.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete virt-handler: %v", err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - // delete controller and apiserver deployment - for _, name := range []string{"virt-controller", "virt-api"} { - obj, exists, err := stores.DeploymentCache.GetByKey(fmt.Sprintf("%s/%s", kv.Namespace, name)) - if err != nil { - log.Log.Errorf("Failed to get %v: %v", name, err) - return err - } else if exists { - if depl, ok := obj.(*appsv1.Deployment); ok && depl.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(depl); err == nil { - expectations.Deployment.AddExpectedDeletion(kvkey, key) - err := clientset.AppsV1().Deployments(kv.Namespace).Delete(name, deleteOptions) - if err != nil { - expectations.Deployment.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete virt-handler: %v", err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - } - - // delete services - objects = stores.ServiceCache.List() - for _, obj := range objects { - if svc, ok := obj.(*corev1.Service); ok && svc.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(svc); err == nil { - expectations.Service.AddExpectedDeletion(kvkey, key) - err := clientset.CoreV1().Services(kv.Namespace).Delete(svc.Name, deleteOptions) - if err != nil { - expectations.Service.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete service %+v: %v", svc, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - // delete RBAC - objects = stores.ClusterRoleBindingCache.List() - for _, obj := range objects { - if crb, ok := obj.(*rbacv1.ClusterRoleBinding); ok && crb.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(crb); err == nil { - expectations.ClusterRoleBinding.AddExpectedDeletion(kvkey, key) - err := clientset.RbacV1().ClusterRoleBindings().Delete(crb.Name, deleteOptions) - if err != nil { - expectations.ClusterRoleBinding.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete crb %+v: %v", crb, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - objects = stores.ClusterRoleCache.List() - for _, obj := range objects { - if cr, ok := obj.(*rbacv1.ClusterRole); ok && cr.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(cr); err == nil { - expectations.ClusterRole.AddExpectedDeletion(kvkey, key) - err := clientset.RbacV1().ClusterRoles().Delete(cr.Name, deleteOptions) - if err != nil { - expectations.ClusterRole.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete cr %+v: %v", cr, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - objects = stores.RoleBindingCache.List() - for _, obj := range objects { - if rb, ok := obj.(*rbacv1.RoleBinding); ok && rb.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(rb); err == nil { - expectations.RoleBinding.AddExpectedDeletion(kvkey, key) - err := clientset.RbacV1().RoleBindings(kv.Namespace).Delete(rb.Name, deleteOptions) - if err != nil { - expectations.RoleBinding.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete rb %+v: %v", rb, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - objects = stores.RoleCache.List() - for _, obj := range objects { - if role, ok := obj.(*rbacv1.Role); ok && role.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(role); err == nil { - expectations.Role.AddExpectedDeletion(kvkey, key) - err := clientset.RbacV1().Roles(kv.Namespace).Delete(role.Name, deleteOptions) - if err != nil { - expectations.Role.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete role %+v: %v", role, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - objects = stores.ServiceAccountCache.List() - for _, obj := range objects { - if sa, ok := obj.(*corev1.ServiceAccount); ok && sa.DeletionTimestamp == nil { - if key, err := controller.KeyFunc(sa); err == nil { - expectations.ServiceAccount.AddExpectedDeletion(kvkey, key) - err := clientset.CoreV1().ServiceAccounts(kv.Namespace).Delete(sa.Name, deleteOptions) - if err != nil { - expectations.ServiceAccount.DeletionObserved(kvkey, key) - log.Log.Errorf("Failed to delete serviceaccount %+v: %v", sa, err) - return err - } - } - } else if !ok { - log.Log.Errorf("Cast failed! obj: %+v", obj) - return nil - } - } - - err = util.UpdateScc(clientset, stores.SCCCache, kv, false) - if err != nil { - log.Log.Errorf("Failed to update SCC: %v", err) - return err - } - - return nil - -} diff --git a/pkg/virt-operator/install-strategy/BUILD.bazel b/pkg/virt-operator/install-strategy/BUILD.bazel index a53bd18260ca..ca0f4ed0134f 100644 --- a/pkg/virt-operator/install-strategy/BUILD.bazel +++ b/pkg/virt-operator/install-strategy/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/virt-operator/util:go_default_library", "//tools/util:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/openshift/api/security/v1:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", diff --git a/pkg/virt-operator/kubevirt.go b/pkg/virt-operator/kubevirt.go index ee750b656326..10a89cd7df86 100644 --- a/pkg/virt-operator/kubevirt.go +++ b/pkg/virt-operator/kubevirt.go @@ -38,8 +38,6 @@ import ( "kubevirt.io/kubevirt/pkg/controller" "kubevirt.io/kubevirt/pkg/kubecli" "kubevirt.io/kubevirt/pkg/log" - "kubevirt.io/kubevirt/pkg/virt-operator/creation" - "kubevirt.io/kubevirt/pkg/virt-operator/deletion" "kubevirt.io/kubevirt/pkg/virt-operator/install-strategy" "kubevirt.io/kubevirt/pkg/virt-operator/util" ) @@ -707,7 +705,7 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { c.garbageCollectInstallStrategyJobs() // deploy - objectsAdded, err := creation.Create(kv, c.stores, c.clientset, &c.kubeVirtExpectations, strategy) + objectsAdded, err := installstrategy.CreateAll(kv, strategy, c.stores, c.clientset, &c.kubeVirtExpectations) if err != nil { // deployment failed @@ -775,6 +773,16 @@ func (c *KubeVirtController) syncDeletion(kv *v1.KubeVirt) error { logger := log.Log.Object(kv) logger.Info("Handling deletion") + strategy, pending, err := c.loadInstallStrategy(kv) + if err != nil { + return err + } + + // we're waiting on the job to finish and the config map to be created + if pending { + return nil + } + // set phase to deleting kv.Status.Phase = v1.KubeVirtPhaseDeleting @@ -782,24 +790,24 @@ func (c *KubeVirtController) syncDeletion(kv *v1.KubeVirt) error { util.RemoveCondition(kv, v1.KubeVirtConditionCreated) util.RemoveCondition(kv, v1.KubeVirtConditionReady) - err := deletion.Delete(kv, c.clientset, c.stores, &c.kubeVirtExpectations) + err = installstrategy.DeleteAll(kv, strategy, c.stores, c.clientset, &c.kubeVirtExpectations) if err != nil { // deletion failed util.UpdateCondition(kv, v1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonDeletionFailedError, fmt.Sprintf("An error occurred during deletion: %v", err)) return err } - err = c.deleteAllInstallStrategy() - if err != nil { - // garbage collection of install strategies failed - util.UpdateCondition(kv, v1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonDeletionFailedError, fmt.Sprintf("An error occurred during deletion: %v", err)) - return err - } - util.RemoveCondition(kv, v1.KubeVirtConditionSynchronized) if c.stores.AllEmpty() { + err = c.deleteAllInstallStrategy() + if err != nil { + // garbage collection of install strategies failed + util.UpdateCondition(kv, v1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonDeletionFailedError, fmt.Sprintf("An error occurred during deletion: %v", err)) + return err + } + // deletion successful kv.Status.Phase = v1.KubeVirtPhaseDeleted diff --git a/pkg/virt-operator/kubevirt_test.go b/pkg/virt-operator/kubevirt_test.go index 8a2ce5011b9d..4a464d15608f 100644 --- a/pkg/virt-operator/kubevirt_test.go +++ b/pkg/virt-operator/kubevirt_test.go @@ -679,7 +679,7 @@ var _ = Describe("KubeVirt Operator", func() { } Context("On valid KubeVirt object", func() { - It("should do nothing if KubeVirt object is deleted", func(done Done) { + It("should delete install strategy configmap once kubevirt install is deleted", func(done Done) { defer close(done) kv := &v1.KubeVirt{ @@ -693,7 +693,10 @@ var _ = Describe("KubeVirt Operator", func() { } kv.DeletionTimestamp = now() + shouldExpectInstallStrategyDeletion() + addKubeVirt(kv) + addInstallStrategy() controller.Execute() }, 15) From 5e3ef213aea275adf307f2bd5ae50298299cb2dd Mon Sep 17 00:00:00 2001 From: David Vossel Date: Thu, 28 Feb 2019 13:02:02 -0500 Subject: [PATCH 03/13] Add ImageTag and ImageRegistry fields to KubeVirt object Signed-off-by: David Vossel --- pkg/api/v1/openapi_generated.go | 14 +++++ pkg/api/v1/types.go | 7 +++ pkg/api/v1/types_swagger_generated.go | 2 + pkg/virt-operator/kubevirt.go | 44 +++++++++++---- pkg/virt-operator/kubevirt_test.go | 11 ++-- pkg/virt-operator/util/client.go | 79 +-------------------------- 6 files changed, 63 insertions(+), 94 deletions(-) diff --git a/pkg/api/v1/openapi_generated.go b/pkg/api/v1/openapi_generated.go index eecbe0626ed6..c8dafa5fd0e4 100644 --- a/pkg/api/v1/openapi_generated.go +++ b/pkg/api/v1/openapi_generated.go @@ -1677,6 +1677,20 @@ func schema_kubevirt_pkg_api_v1_KubeVirtSpec(ref common.ReferenceCallback) commo Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Properties: map[string]spec.Schema{ + "imageTag": { + SchemaProps: spec.SchemaProps{ + Description: "The image tag to use for the continer images installed. Defaults to the same tag as the operator's container image.", + Type: []string{"string"}, + Format: "", + }, + }, + "imageRegistry": { + SchemaProps: spec.SchemaProps{ + Description: "The image registry to pull the container images from Defaults to the same registry the operator's container image is pulled from.", + Type: []string{"string"}, + Format: "", + }, + }, "imagePullPolicy": { SchemaProps: spec.SchemaProps{ Description: "The ImagePullPolicy to use.", diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 928ef11419f9..6bd0d95addd9 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -1050,6 +1050,13 @@ func (kl *KubeVirtList) GetListMeta() meta.List { // --- // +k8s:openapi-gen=true type KubeVirtSpec struct { + // The image tag to use for the continer images installed. + // Defaults to the same tag as the operator's container image. + ImageTag string `json:"imageTag,omitempty"` + // The image registry to pull the container images from + // Defaults to the same registry the operator's container image is pulled from. + ImageRegistry string `json:"imageRegistry,omitempty"` + // The ImagePullPolicy to use. ImagePullPolicy k8sv1.PullPolicy `json:"imagePullPolicy,omitempty" valid:"required"` } diff --git a/pkg/api/v1/types_swagger_generated.go b/pkg/api/v1/types_swagger_generated.go index 108ba7d585c6..07ee1051de90 100644 --- a/pkg/api/v1/types_swagger_generated.go +++ b/pkg/api/v1/types_swagger_generated.go @@ -248,6 +248,8 @@ func (KubeVirtList) SwaggerDoc() map[string]string { func (KubeVirtSpec) SwaggerDoc() map[string]string { return map[string]string{ + "imageTag": "The image tag to use for the continer images installed.\nDefaults to the same tag as the operator's container image.", + "imageRegistry": "The image registry to pull the container images from\nDefaults to the same registry the operator's container image is pulled from.", "imagePullPolicy": "The ImagePullPolicy to use.", } } diff --git a/pkg/virt-operator/kubevirt.go b/pkg/virt-operator/kubevirt.go index 10a89cd7df86..5256ba0ce3db 100644 --- a/pkg/virt-operator/kubevirt.go +++ b/pkg/virt-operator/kubevirt.go @@ -454,7 +454,10 @@ func (c *KubeVirtController) execute(key string) error { return syncError } -func (c *KubeVirtController) generateInstallStrategyJob(kv *v1.KubeVirt, imageTag string, imageRegistry string) *batchv1.Job { +func (c *KubeVirtController) generateInstallStrategyJob(kv *v1.KubeVirt) *batchv1.Job { + + imageTag := c.getImageTag(kv) + imageRegistry := c.getImageRegistry(kv) pullPolicy := k8sv1.PullIfNotPresent if string(kv.Spec.ImagePullPolicy) != "" { @@ -559,6 +562,20 @@ func (c *KubeVirtController) deleteAllInstallStrategy() error { return nil } +func (c *KubeVirtController) getImageTag(kv *v1.KubeVirt) string { + if kv.Spec.ImageTag == "" { + return c.config.ImageTag + } + return kv.Spec.ImageTag +} + +func (c *KubeVirtController) getImageRegistry(kv *v1.KubeVirt) string { + if kv.Spec.ImageRegistry == "" { + return c.config.ImageRegistry + } + return kv.Spec.ImageRegistry +} + // Loads install strategies into memory, and generates jobs to // create install strategies that don't exist yet. func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrategy.InstallStrategy, bool, error) { @@ -569,19 +586,19 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat } // 1. see if we already loaded the install strategy - strategy, ok := c.getInstallStrategyFromMap(c.config.ImageTag) + strategy, ok := c.getInstallStrategyFromMap(c.getImageTag(kv)) if ok { // we already loaded this strategy into memory return strategy, false, nil } // 2. look for install strategy config map in cache. - strategy, err = installstrategy.LoadInstallStrategyFromCache(c.stores, kv.Namespace, c.config.ImageTag) + strategy, err = installstrategy.LoadInstallStrategyFromCache(c.stores, kv.Namespace, c.getImageTag(kv)) if err == nil { c.installStrategyMutex.Lock() defer c.installStrategyMutex.Unlock() - c.installStrategyMap[c.config.ImageTag] = strategy - log.Log.Infof("Loaded install strategy for kubevirt version %s into cache", c.config.ImageTag) + c.installStrategyMap[c.getImageTag(kv)] = strategy + log.Log.Infof("Loaded install strategy for kubevirt version %s into cache", c.getImageTag(kv)) return strategy, false, nil } @@ -589,7 +606,7 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat // 3. See if we have a pending job in flight for this install strategy. batch := c.clientset.BatchV1() - job := c.generateInstallStrategyJob(kv, c.config.ImageTag, c.config.ImageRegistry) + job := c.generateInstallStrategyJob(kv) obj, exists, _ := c.stores.InstallStrategyJobCache.Get(job) if exists { @@ -599,7 +616,7 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat // job completed but we don't have a install strategy still // delete the job and we'll re-execute it once it is removed. - log.Log.Object(cachedJob).Errorf("Job failed to create install strategy for version %s", c.config.ImageTag) + log.Log.Object(cachedJob).Errorf("Job failed to create install strategy for version %s", c.getImageTag(kv)) if cachedJob.DeletionTimestamp == nil { // Just in case there's an issue causing the job to fail @@ -634,7 +651,7 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat return nil, true, err } - log.Log.Object(cachedJob).Errorf("Deleting job for install strategy version %s because configmap was not generated", c.config.ImageTag) + log.Log.Object(cachedJob).Errorf("Deleting job for install strategy version %s because configmap was not generated", c.getImageTag(kv)) } // waiting on deleted job to disappear before re-creating it. return nil, true, err @@ -652,7 +669,7 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat c.kubeVirtExpectations.InstallStrategyJob.LowerExpectations(kvkey, 1, 0) return nil, true, err } - log.Log.Infof("Created job to generate install strategy configmap for version %s", c.config.ImageTag) + log.Log.Infof("Created job to generate install strategy configmap for version %s", c.getImageTag(kv)) // pending is true here because we're waiting on the job // to generate the install strategy @@ -664,9 +681,9 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { logger.Infof("Handling deployment") // Set versions... - if kv.Status.OperatorVersion == "" { - util.SetVersions(kv, c.config) - } + util.SetOperatorVersion(kv) + // record the version we're targetting to install + kv.Status.TargetKubeVirtVersion = c.getImageTag(kv) // Set phase to deploying kv.Status.Phase = v1.KubeVirtPhaseDeploying @@ -718,6 +735,9 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { if objectsAdded == 0 { + // record the version just installed + kv.Status.ObservedKubeVirtVersion = c.getImageTag(kv) + // add Created condition util.UpdateCondition(kv, v1.KubeVirtConditionCreated, k8sv1.ConditionTrue, ConditionReasonDeploymentCreated, "All resources were created.") logger.Info("All KubeVirt resources created") diff --git a/pkg/virt-operator/kubevirt_test.go b/pkg/virt-operator/kubevirt_test.go index 4a464d15608f..b37520f46d77 100644 --- a/pkg/virt-operator/kubevirt_test.go +++ b/pkg/virt-operator/kubevirt_test.go @@ -51,6 +51,7 @@ import ( "kubevirt.io/kubevirt/pkg/kubecli" "kubevirt.io/kubevirt/pkg/log" "kubevirt.io/kubevirt/pkg/testutils" + "kubevirt.io/kubevirt/pkg/version" "kubevirt.io/kubevirt/pkg/virt-operator/creation/components" "kubevirt.io/kubevirt/pkg/virt-operator/creation/rbac" "kubevirt.io/kubevirt/pkg/virt-operator/install-strategy" @@ -725,7 +726,9 @@ var _ = Describe("KubeVirt Operator", func() { Message: "All components are ready.", }, }, - OperatorVersion: "v9.9.9", + OperatorVersion: version.Get().String(), + TargetKubeVirtVersion: "v9.9.9", + ObservedKubeVirtVersion: "v9.9.9", }, } @@ -819,7 +822,7 @@ var _ = Describe("KubeVirt Operator", func() { Status: v1.KubeVirtStatus{}, } - job := controller.generateInstallStrategyJob(kv, "v9.9.9", "somerepository") + job := controller.generateInstallStrategyJob(kv) // will only create a new job after 10 seconds has passed. // this is just a simple mechanism to prevent spin loops @@ -850,7 +853,7 @@ var _ = Describe("KubeVirt Operator", func() { Status: v1.KubeVirtStatus{}, } - job := controller.generateInstallStrategyJob(kv, "v9.9.9", "somerepository") + job := controller.generateInstallStrategyJob(kv) job.Status.CompletionTime = now() @@ -876,7 +879,7 @@ var _ = Describe("KubeVirt Operator", func() { addKubeVirt(kv) addInstallStrategy() - job := controller.generateInstallStrategyJob(kv, "v9.9.9", "somerepository") + job := controller.generateInstallStrategyJob(kv) job.Status.CompletionTime = now() addInstallStrategyJob(job) diff --git a/pkg/virt-operator/util/client.go b/pkg/virt-operator/util/client.go index 0364a8215c25..1e88d92f4cc7 100644 --- a/pkg/virt-operator/util/client.go +++ b/pkg/virt-operator/util/client.go @@ -20,14 +20,12 @@ package util import ( - "fmt" "time" secv1 "github.com/openshift/api/security/v1" k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" virtv1 "kubevirt.io/kubevirt/pkg/api/v1" "kubevirt.io/kubevirt/pkg/kubecli" @@ -116,83 +114,8 @@ func hasFinalizer(kv *virtv1.KubeVirt) bool { return false } -func SetVersions(kv *virtv1.KubeVirt, config KubeVirtDeploymentConfig) { - +func SetOperatorVersion(kv *virtv1.KubeVirt) { kv.Status.OperatorVersion = version.Get().String() - - // Note: for now we just set targetKubeVirtVersion and observedKubeVirtVersion to the tag of the operator image - // In future this needs some more work... - kv.Status.TargetKubeVirtVersion = config.ImageTag - kv.Status.ObservedKubeVirtVersion = config.ImageTag - -} - -func UpdateScc(clientset kubecli.KubevirtClient, sccStore cache.Store, kv *virtv1.KubeVirt, add bool) error { - - privSccObj, exists, err := sccStore.GetByKey("privileged") - if !exists { - return nil - } else if err != nil { - return err - } - - privScc, ok := privSccObj.(*secv1.SecurityContextConstraints) - if !ok { - return fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSccObj) - } - privSccCopy := privScc.DeepCopy() - - var kubeVirtAccounts []string - prefix := "system:serviceaccount" - kubeVirtAccounts = append(kubeVirtAccounts, fmt.Sprintf("%s:%s:%s", prefix, kv.Namespace, "kubevirt-handler")) - kubeVirtAccounts = append(kubeVirtAccounts, fmt.Sprintf("%s:%s:%s", prefix, kv.Namespace, "kubevirt-apiserver")) - kubeVirtAccounts = append(kubeVirtAccounts, fmt.Sprintf("%s:%s:%s", prefix, kv.Namespace, "kubevirt-controller")) - - modified := false - users := privSccCopy.Users - for _, acc := range kubeVirtAccounts { - if add { - if !contains(users, acc) { - users = append(users, acc) - modified = true - } - } else { - removed := false - users, removed = remove(users, acc) - modified = modified || removed - } - } - if modified { - privSccCopy.Users = users - _, err = clientset.SecClient().SecurityContextConstraints().Update(privSccCopy) - if err != nil { - return fmt.Errorf("unable to update scc: %v", err) - } - } - - return nil -} - -func contains(users []string, user string) bool { - for _, u := range users { - if u == user { - return true - } - } - return false -} - -func remove(users []string, user string) ([]string, bool) { - var newUsers []string - modified := false - for _, u := range users { - if u != user { - newUsers = append(newUsers, u) - } else { - modified = true - } - } - return newUsers, modified } func IsOnOpenshift(clientset kubecli.KubevirtClient) (bool, error) { From 911e693897cf747d3ce58bd2904ed9ebfde0f19c Mon Sep 17 00:00:00 2001 From: David Vossel Date: Thu, 28 Feb 2019 15:17:18 -0500 Subject: [PATCH 04/13] operator image tag unit test Signed-off-by: David Vossel --- pkg/virt-operator/BUILD.bazel | 1 + pkg/virt-operator/kubevirt_test.go | 79 +++++++++++++++++++++++++----- 2 files changed, 68 insertions(+), 12 deletions(-) diff --git a/pkg/virt-operator/BUILD.bazel b/pkg/virt-operator/BUILD.bazel index 76dfa2ec1488..a1ba430eff93 100644 --- a/pkg/virt-operator/BUILD.bazel +++ b/pkg/virt-operator/BUILD.bazel @@ -46,6 +46,7 @@ go_test( "//pkg/kubecli:go_default_library", "//pkg/log:go_default_library", "//pkg/testutils:go_default_library", + "//pkg/version:go_default_library", "//pkg/virt-operator/creation/components:go_default_library", "//pkg/virt-operator/creation/rbac:go_default_library", "//pkg/virt-operator/install-strategy:go_default_library", diff --git a/pkg/virt-operator/kubevirt_test.go b/pkg/virt-operator/kubevirt_test.go index b37520f46d77..133b367deb89 100644 --- a/pkg/virt-operator/kubevirt_test.go +++ b/pkg/virt-operator/kubevirt_test.go @@ -93,7 +93,9 @@ var _ = Describe("KubeVirt Operator", func() { var informers util.Informers var stores util.Stores - os.Setenv(util.OperatorImageEnvName, "somerepository/virt-operator:v9.9.9") + defaultImageTag := "v9.9.9" + defaultRegistry := "someregistry" + os.Setenv(util.OperatorImageEnvName, fmt.Sprintf("%s/virt-operator:%s", defaultRegistry, defaultImageTag)) var totalAdds int var totalDeletions int @@ -344,14 +346,10 @@ var _ = Describe("KubeVirt Operator", func() { } } - addInstallStrategy := func() { - repository := "somerepository" - version := "v9.9.9" - + addInstallStrategy := func(imageTag string, imageRegistry string) { // install strategy config - resource, _ := installstrategy.NewInstallStrategyConfigMap(NAMESPACE, version, repository) + resource, _ := installstrategy.NewInstallStrategyConfigMap(NAMESPACE, imageTag, imageRegistry) addResource(resource) - } addAll := func() { @@ -363,9 +361,6 @@ var _ = Describe("KubeVirt Operator", func() { all := make([]interface{}, 0) - // install strategy config - addInstallStrategy() - // rbac all = append(all, rbac.GetAllCluster(NAMESPACE)...) all = append(all, rbac.GetAllApiServer(NAMESPACE)...) @@ -652,6 +647,7 @@ var _ = Describe("KubeVirt Operator", func() { kubeClient.Fake.PrependReactor("create", "deployments", genericCreateFunc) kubeClient.Fake.PrependReactor("create", "daemonsets", genericCreateFunc) } + shouldExpectKubeVirtUpdate := func(times int) { update := kvInterface.EXPECT().Update(gomock.Any()) update.Do(func(kv *v1.KubeVirt) { @@ -660,6 +656,17 @@ var _ = Describe("KubeVirt Operator", func() { }).Times(times) } + shouldExpectKubeVirtUpdateVersion := func(times int, imageTag string) { + update := kvInterface.EXPECT().Update(gomock.Any()) + update.Do(func(kv *v1.KubeVirt) { + + Expect(kv.Status.TargetKubeVirtVersion).To(Equal(imageTag)) + Expect(kv.Status.ObservedKubeVirtVersion).To(Equal(imageTag)) + kvInformer.GetStore().Update(kv) + update.Return(kv, nil) + }).Times(times) + } + shouldExpectKubeVirtUpdateFailureCondition := func() { update := kvInterface.EXPECT().Update(gomock.Any()) update.Do(func(kv *v1.KubeVirt) { @@ -697,8 +704,54 @@ var _ = Describe("KubeVirt Operator", func() { shouldExpectInstallStrategyDeletion() addKubeVirt(kv) - addInstallStrategy() + addInstallStrategy(defaultImageTag, defaultRegistry) + controller.Execute() + }, 15) + + It("should observe custom image tag in status during deploy", func(done Done) { + defer close(done) + + kv := &v1.KubeVirt{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-install", + Namespace: NAMESPACE, + Finalizers: []string{util.KubeVirtFinalizer}, + }, + Spec: v1.KubeVirtSpec{ + ImageTag: "custom.tag", + }, + Status: v1.KubeVirtStatus{ + Phase: v1.KubeVirtPhaseDeployed, + Conditions: []v1.KubeVirtCondition{ + { + Type: v1.KubeVirtConditionCreated, + Status: k8sv1.ConditionTrue, + Reason: ConditionReasonDeploymentCreated, + Message: "All resources were created.", + }, + { + Type: v1.KubeVirtConditionReady, + Status: k8sv1.ConditionTrue, + Reason: ConditionReasonDeploymentReady, + Message: "All components are ready.", + }, + }, + OperatorVersion: version.Get().String(), + }, + } + + // create all resources which should already exist + addKubeVirt(kv) + addAll() + // install strategy config + addInstallStrategy("custom.tag", defaultRegistry) + + makeApiAndControllerReady() + makeHandlerReady() + + shouldExpectKubeVirtUpdateVersion(1, "custom.tag") controller.Execute() + }, 15) It("should do nothing if KubeVirt object is deployed", func(done Done) { @@ -734,6 +787,7 @@ var _ = Describe("KubeVirt Operator", func() { // create all resources which should already exist addKubeVirt(kv) + addInstallStrategy(defaultImageTag, defaultRegistry) addAll() makeApiAndControllerReady() makeHandlerReady() @@ -877,7 +931,7 @@ var _ = Describe("KubeVirt Operator", func() { }, } addKubeVirt(kv) - addInstallStrategy() + addInstallStrategy(defaultImageTag, defaultRegistry) job := controller.generateInstallStrategyJob(kv) @@ -969,6 +1023,7 @@ var _ = Describe("KubeVirt Operator", func() { addKubeVirt(kv) // create all resources which should be deleted + addInstallStrategy(defaultImageTag, defaultRegistry) addAll() shouldExpectKubeVirtUpdate(1) From 21060d45448a5cdb6c95f341908bc394e1d9902a Mon Sep 17 00:00:00 2001 From: David Vossel Date: Fri, 1 Mar 2019 11:32:35 -0500 Subject: [PATCH 05/13] Observe both image tag and image registry versions during operator install Signed-off-by: David Vossel --- pkg/api/v1/openapi_generated.go | 12 ++++ pkg/api/v1/types.go | 21 ++++--- pkg/controller/virtinformers.go | 4 +- .../install-strategy/strategy.go | 57 +++++++++++++------ pkg/virt-operator/kubevirt.go | 32 +++++++---- pkg/virt-operator/kubevirt_test.go | 47 +++++---------- 6 files changed, 103 insertions(+), 70 deletions(-) diff --git a/pkg/api/v1/openapi_generated.go b/pkg/api/v1/openapi_generated.go index c8dafa5fd0e4..52b8f73e4ac7 100644 --- a/pkg/api/v1/openapi_generated.go +++ b/pkg/api/v1/openapi_generated.go @@ -1741,12 +1741,24 @@ func schema_kubevirt_pkg_api_v1_KubeVirtStatus(ref common.ReferenceCallback) com Format: "", }, }, + "targetKubeVirtRegistry": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, "observedKubeVirtVersion": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, Format: "", }, }, + "observedKubeVirtRegistry": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 6bd0d95addd9..925d5e6df30e 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -447,8 +447,13 @@ const ( // This label will be set on all resources created by the operator ManagedByLabel = "app.kubernetes.io/managed-by" ManagedByLabelOperatorValue = "kubevirt-operator" - // This label represents the kubevirt version for an install strategy configmap. - InstallStrategyVersionLabel = "kubevirt.io/install-strategy-version" + // This annotation represents the kubevirt version for an install strategy configmap. + InstallStrategyVersionAnnotation = "kubevirt.io/install-strategy-version" + // This annotation represents the kubevirt registry used for an install strategy configmap. + InstallStrategyRegistryAnnotation = "kubevirt.io/install-strategy-registry" + + // This label indicates the object is a part of the install strategy retrieval process. + InstallStrategyLabel = "kubevirt.io/install-strategy" VirtualMachineInstanceFinalizer string = "foregroundDeleteVirtualMachine" CPUManager string = "cpumanager" @@ -1065,11 +1070,13 @@ type KubeVirtSpec struct { // --- // +k8s:openapi-gen=true type KubeVirtStatus struct { - Phase KubeVirtPhase `json:"phase,omitempty"` - Conditions []KubeVirtCondition `json:"conditions,omitempty" optional:"true"` - OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"` - TargetKubeVirtVersion string `json:"targetKubeVirtVersion,omitempty" optional:"true"` - ObservedKubeVirtVersion string `json:"observedKubeVirtVersion,omitempty" optional:"true"` + Phase KubeVirtPhase `json:"phase,omitempty"` + Conditions []KubeVirtCondition `json:"conditions,omitempty" optional:"true"` + OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"` + TargetKubeVirtVersion string `json:"targetKubeVirtVersion,omitempty" optional:"true"` + TargetKubeVirtRegistry string `json:"targetKubeVirtRegistry,omitempty" optional:"true"` + ObservedKubeVirtVersion string `json:"observedKubeVirtVersion,omitempty" optional:"true"` + ObservedKubeVirtRegistry string `json:"observedKubeVirtRegistry,omitempty" optional:"true"` } // KubeVirtPhase is a label for the phase of a KubeVirt deployment at the current time. diff --git a/pkg/controller/virtinformers.go b/pkg/controller/virtinformers.go index 481918aabb3a..554182e47b46 100644 --- a/pkg/controller/virtinformers.go +++ b/pkg/controller/virtinformers.go @@ -429,7 +429,7 @@ func (f *kubeInformerFactory) DummyOperatorSCC() cache.SharedIndexInformer { func (f *kubeInformerFactory) OperatorInstallStrategyConfigMaps() cache.SharedIndexInformer { return f.getInformer("installStrategyConfigMapInformer", func() cache.SharedIndexInformer { - labelSelector, err := labels.Parse(kubev1.InstallStrategyVersionLabel) + labelSelector, err := labels.Parse(kubev1.InstallStrategyLabel) if err != nil { panic(err) } @@ -441,7 +441,7 @@ func (f *kubeInformerFactory) OperatorInstallStrategyConfigMaps() cache.SharedIn func (f *kubeInformerFactory) OperatorInstallStrategyJob() cache.SharedIndexInformer { return f.getInformer("installStrategyJobsInformer", func() cache.SharedIndexInformer { - labelSelector, err := labels.Parse(kubev1.InstallStrategyVersionLabel) + labelSelector, err := labels.Parse(kubev1.InstallStrategyLabel) if err != nil { panic(err) } diff --git a/pkg/virt-operator/install-strategy/strategy.go b/pkg/virt-operator/install-strategy/strategy.go index 000d835cd475..e66785ce13fb 100644 --- a/pkg/virt-operator/install-strategy/strategy.go +++ b/pkg/virt-operator/install-strategy/strategy.go @@ -79,10 +79,6 @@ type InstallStrategy struct { customSccPrivileges []*customSccPrivilegedAccounts } -func generateConfigMapName(imageTag string) string { - return fmt.Sprintf("kubevirt-install-strategy-%s", imageTag) -} - func NewInstallStrategyConfigMap(namespace string, imageTag string, imageRegistry string) (*corev1.ConfigMap, error) { strategy, err := GenerateCurrentInstallStrategy( @@ -97,11 +93,15 @@ func NewInstallStrategyConfigMap(namespace string, imageTag string, imageRegistr configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: generateConfigMapName(imageTag), - Namespace: namespace, + GenerateName: "kubevirt-install-strategy-", + Namespace: namespace, Labels: map[string]string{ - v1.ManagedByLabel: v1.ManagedByLabelOperatorValue, - v1.InstallStrategyVersionLabel: imageTag, + v1.ManagedByLabel: v1.ManagedByLabelOperatorValue, + v1.InstallStrategyLabel: "", + }, + Annotations: map[string]string{ + v1.InstallStrategyVersionAnnotation: imageTag, + v1.InstallStrategyRegistryAnnotation: imageRegistry, }, }, Data: map[string]string{ @@ -267,21 +267,42 @@ func GenerateCurrentInstallStrategy(namespace string, return strategy, nil } -func LoadInstallStrategyFromCache(stores util.Stores, namespace string, imageTag string) (*InstallStrategy, error) { +func LoadInstallStrategyFromCache(stores util.Stores, namespace string, imageTag string, imageRegistry string) (*InstallStrategy, error) { + var configMap *corev1.ConfigMap + var matchingConfigMaps []*corev1.ConfigMap - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: generateConfigMapName(imageTag), - Namespace: namespace, - }, + for _, obj := range stores.InstallStrategyConfigMapCache.List() { + config, ok := obj.(*corev1.ConfigMap) + if !ok { + continue + } + if config.ObjectMeta.Annotations == nil { + continue + } + + version, _ := config.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation] + registry, _ := config.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] + if version == imageTag && registry == imageRegistry { + matchingConfigMaps = append(matchingConfigMaps, config) + } + } + + if len(matchingConfigMaps) == 0 { + return nil, fmt.Errorf("no install strategy configmap found for version %s with registry %s", imageTag, imageRegistry) } - obj, exists, _ := stores.InstallStrategyConfigMapCache.Get(configMap) - if !exists { - return nil, fmt.Errorf("no install strategy configmap found for version %s", imageTag) + // choose the most recent configmap if multiple match. + mostRecentTime := metav1.Time{} + for _, config := range matchingConfigMaps { + if configMap == nil { + configMap = config + mostRecentTime = config.ObjectMeta.CreationTimestamp + } else if mostRecentTime.Before(&config.ObjectMeta.CreationTimestamp) { + configMap = config + mostRecentTime = config.ObjectMeta.CreationTimestamp + } } - configMap = obj.(*corev1.ConfigMap) data, ok := configMap.Data["manifests"] if !ok { return nil, fmt.Errorf("install strategy configmap %s does not contain 'manifests' key", configMap.Name) diff --git a/pkg/virt-operator/kubevirt.go b/pkg/virt-operator/kubevirt.go index 5256ba0ce3db..6b75b7600e92 100644 --- a/pkg/virt-operator/kubevirt.go +++ b/pkg/virt-operator/kubevirt.go @@ -473,9 +473,13 @@ func (c *KubeVirtController) generateInstallStrategyJob(kv *v1.KubeVirt) *batchv Namespace: kv.Namespace, Name: fmt.Sprintf("virt-install-strategy-job-%s", imageTag), Labels: map[string]string{ - v1.AppLabel: "", - v1.ManagedByLabel: v1.ManagedByLabelOperatorValue, - v1.InstallStrategyVersionLabel: imageTag, + v1.AppLabel: "", + v1.ManagedByLabel: v1.ManagedByLabelOperatorValue, + v1.InstallStrategyLabel: "", + }, + Annotations: map[string]string{ + v1.InstallStrategyVersionAnnotation: imageTag, + v1.InstallStrategyRegistryAnnotation: imageRegistry, }, }, Spec: batchv1.JobSpec{ @@ -535,14 +539,22 @@ func (c *KubeVirtController) garbageCollectInstallStrategyJobs() error { return nil } -func (c *KubeVirtController) getInstallStrategyFromMap(version string) (*installstrategy.InstallStrategy, bool) { +func (c *KubeVirtController) getInstallStrategyFromMap(version string, registry string) (*installstrategy.InstallStrategy, bool) { c.installStrategyMutex.Lock() defer c.installStrategyMutex.Unlock() - strategy, ok := c.installStrategyMap[version] + strategy, ok := c.installStrategyMap[fmt.Sprintf("%s/%s", registry, version)] return strategy, ok } +func (c *KubeVirtController) cacheInstallStrategyInMap(strategy *installstrategy.InstallStrategy, version string, registry string) { + + c.installStrategyMutex.Lock() + defer c.installStrategyMutex.Unlock() + c.installStrategyMap[fmt.Sprintf("%s/%s", registry, version)] = strategy + +} + func (c *KubeVirtController) deleteAllInstallStrategy() error { for _, obj := range c.stores.InstallStrategyConfigMapCache.List() { @@ -586,18 +598,16 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat } // 1. see if we already loaded the install strategy - strategy, ok := c.getInstallStrategyFromMap(c.getImageTag(kv)) + strategy, ok := c.getInstallStrategyFromMap(c.getImageTag(kv), c.getImageRegistry(kv)) if ok { // we already loaded this strategy into memory return strategy, false, nil } // 2. look for install strategy config map in cache. - strategy, err = installstrategy.LoadInstallStrategyFromCache(c.stores, kv.Namespace, c.getImageTag(kv)) + strategy, err = installstrategy.LoadInstallStrategyFromCache(c.stores, kv.Namespace, c.getImageTag(kv), c.getImageRegistry(kv)) if err == nil { - c.installStrategyMutex.Lock() - defer c.installStrategyMutex.Unlock() - c.installStrategyMap[c.getImageTag(kv)] = strategy + c.cacheInstallStrategyInMap(strategy, c.getImageTag(kv), c.getImageRegistry(kv)) log.Log.Infof("Loaded install strategy for kubevirt version %s into cache", c.getImageTag(kv)) return strategy, false, nil } @@ -684,6 +694,7 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { util.SetOperatorVersion(kv) // record the version we're targetting to install kv.Status.TargetKubeVirtVersion = c.getImageTag(kv) + kv.Status.TargetKubeVirtRegistry = c.getImageRegistry(kv) // Set phase to deploying kv.Status.Phase = v1.KubeVirtPhaseDeploying @@ -737,6 +748,7 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { // record the version just installed kv.Status.ObservedKubeVirtVersion = c.getImageTag(kv) + kv.Status.ObservedKubeVirtRegistry = c.getImageRegistry(kv) // add Created condition util.UpdateCondition(kv, v1.KubeVirtConditionCreated, k8sv1.ConditionTrue, ConditionReasonDeploymentCreated, "All resources were created.") diff --git a/pkg/virt-operator/kubevirt_test.go b/pkg/virt-operator/kubevirt_test.go index 133b367deb89..f04286447c27 100644 --- a/pkg/virt-operator/kubevirt_test.go +++ b/pkg/virt-operator/kubevirt_test.go @@ -37,10 +37,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" extclientfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" @@ -353,8 +351,8 @@ var _ = Describe("KubeVirt Operator", func() { } addAll := func() { - repository := "somerepository" - version := "v9.9.9" + repository := defaultRegistry + version := defaultImageTag pullPolicy := "IfNotPresent" imagePullPolicy := k8sv1.PullPolicy(pullPolicy) verbosity := "2" @@ -779,9 +777,11 @@ var _ = Describe("KubeVirt Operator", func() { Message: "All components are ready.", }, }, - OperatorVersion: version.Get().String(), - TargetKubeVirtVersion: "v9.9.9", - ObservedKubeVirtVersion: "v9.9.9", + OperatorVersion: version.Get().String(), + TargetKubeVirtVersion: defaultImageTag, + TargetKubeVirtRegistry: defaultRegistry, + ObservedKubeVirtVersion: defaultImageTag, + ObservedKubeVirtRegistry: defaultRegistry, }, } @@ -1052,43 +1052,24 @@ var _ = Describe("KubeVirt Operator", func() { Expect(ok).To(BeTrue()) configMap := create.GetObject().(*k8sv1.ConfigMap) - Expect(configMap.Name).To(Equal("kubevirt-install-strategy-v9.9.9")) + Expect(configMap.GenerateName).To(Equal("kubevirt-install-strategy-")) - _, ok = configMap.Data["manifests"] + version, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation] Expect(ok).To(BeTrue()) - return true, create.GetObject(), nil - }) - - // This generates and posts the install strategy config map - installstrategy.DumpInstallStrategyToConfigMap(virtClient) - }, 15) - - It("should update an existing install strategy config map", func(done Done) { - defer close(done) - - kubeClient.Fake.PrependReactor("create", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) { - create, ok := action.(testing.CreateAction) - Expect(ok).To(BeTrue()) + Expect(version).To(Equal(defaultImageTag)) - configMap := create.GetObject().(*k8sv1.ConfigMap) - Expect(configMap.Name).To(Equal("kubevirt-install-strategy-v9.9.9")) - return true, nil, errors.NewAlreadyExists(schema.GroupResource{}, configMap.Name) - }) - kubeClient.Fake.PrependReactor("update", "configmaps", func(action testing.Action) (handled bool, obj runtime.Object, err error) { - update, ok := action.(testing.UpdateAction) + registry, ok := configMap.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] + Expect(registry).To(Equal(defaultRegistry)) Expect(ok).To(BeTrue()) - configMap := update.GetObject().(*k8sv1.ConfigMap) - Expect(configMap.Name).To(Equal("kubevirt-install-strategy-v9.9.9")) - _, ok = configMap.Data["manifests"] Expect(ok).To(BeTrue()) - return true, update.GetObject(), nil + return true, create.GetObject(), nil }) - // This should update an already existing install strategy + // This generates and posts the install strategy config map installstrategy.DumpInstallStrategyToConfigMap(virtClient) }, 15) }) From 8377063ca0c450ef013f1c7527eb344a449da856 Mon Sep 17 00:00:00 2001 From: David Vossel Date: Fri, 1 Mar 2019 13:05:36 -0500 Subject: [PATCH 06/13] pick the most recent install strategy matching the version/registry Signed-off-by: David Vossel --- .../install-strategy/strategy.go | 27 ++++++++++------- .../install-strategy/strategy_test.go | 29 +++++++++++++++++++ 2 files changed, 46 insertions(+), 10 deletions(-) diff --git a/pkg/virt-operator/install-strategy/strategy.go b/pkg/virt-operator/install-strategy/strategy.go index e66785ce13fb..052f9bb95527 100644 --- a/pkg/virt-operator/install-strategy/strategy.go +++ b/pkg/virt-operator/install-strategy/strategy.go @@ -267,6 +267,22 @@ func GenerateCurrentInstallStrategy(namespace string, return strategy, nil } +func mostRecentConfigMap(configMaps []*corev1.ConfigMap) *corev1.ConfigMap { + var configMap *corev1.ConfigMap + // choose the most recent configmap if multiple match. + mostRecentTime := metav1.Time{} + for _, config := range configMaps { + if configMap == nil { + configMap = config + mostRecentTime = config.ObjectMeta.CreationTimestamp + } else if mostRecentTime.Before(&config.ObjectMeta.CreationTimestamp) { + configMap = config + mostRecentTime = config.ObjectMeta.CreationTimestamp + } + } + return configMap +} + func LoadInstallStrategyFromCache(stores util.Stores, namespace string, imageTag string, imageRegistry string) (*InstallStrategy, error) { var configMap *corev1.ConfigMap var matchingConfigMaps []*corev1.ConfigMap @@ -292,16 +308,7 @@ func LoadInstallStrategyFromCache(stores util.Stores, namespace string, imageTag } // choose the most recent configmap if multiple match. - mostRecentTime := metav1.Time{} - for _, config := range matchingConfigMaps { - if configMap == nil { - configMap = config - mostRecentTime = config.ObjectMeta.CreationTimestamp - } else if mostRecentTime.Before(&config.ObjectMeta.CreationTimestamp) { - configMap = config - mostRecentTime = config.ObjectMeta.CreationTimestamp - } - } + configMap = mostRecentConfigMap(matchingConfigMaps) data, ok := configMap.Data["manifests"] if !ok { diff --git a/pkg/virt-operator/install-strategy/strategy_test.go b/pkg/virt-operator/install-strategy/strategy_test.go index 1426b074baf0..33e1b06e6bcb 100644 --- a/pkg/virt-operator/install-strategy/strategy_test.go +++ b/pkg/virt-operator/install-strategy/strategy_test.go @@ -31,6 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var _ = Describe("Install Strategy", func() { @@ -154,4 +155,32 @@ var _ = Describe("Install Strategy", func() { } }) }) + + Context("should match", func() { + It("the most recent install strategy.", func() { + var configMaps []*corev1.ConfigMap + + configMaps = append(configMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + CreationTimestamp: metav1.Time{}, + }, + }) + configMaps = append(configMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test2", + CreationTimestamp: metav1.Now(), + }, + }) + configMaps = append(configMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test3", + CreationTimestamp: metav1.Time{}, + }, + }) + + configMap := mostRecentConfigMap(configMaps) + Expect(configMap.Name).To(Equal("test2")) + }) + }) }) From e47fa363c3573c6508b124640a32c7f0a9efd20b Mon Sep 17 00:00:00 2001 From: David Vossel Date: Fri, 1 Mar 2019 14:12:13 -0500 Subject: [PATCH 07/13] Deny attempts to update kubevirt install until updates are implemented Signed-off-by: David Vossel --- pkg/virt-operator/kubevirt.go | 56 ++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 18 deletions(-) diff --git a/pkg/virt-operator/kubevirt.go b/pkg/virt-operator/kubevirt.go index 6b75b7600e92..ad82449cfc0a 100644 --- a/pkg/virt-operator/kubevirt.go +++ b/pkg/virt-operator/kubevirt.go @@ -43,11 +43,12 @@ import ( ) const ( - ConditionReasonDeploymentFailedExisting = "ExistingDeployment" - ConditionReasonDeploymentFailedError = "DeploymentFailed" - ConditionReasonDeletionFailedError = "DeletionFailed" - ConditionReasonDeploymentCreated = "AllResourcesCreated" - ConditionReasonDeploymentReady = "AllComponentsReady" + ConditionReasonDeploymentFailedExisting = "ExistingDeployment" + ConditionReasonDeploymentFailedError = "DeploymentFailed" + ConditionReasonDeletionFailedError = "DeletionFailed" + ConditionReasonUpdateNotImplementedError = "UpdatesNotImplemented" + ConditionReasonDeploymentCreated = "AllResourcesCreated" + ConditionReasonDeploymentReady = "AllComponentsReady" ) type KubeVirtController struct { @@ -575,14 +576,18 @@ func (c *KubeVirtController) deleteAllInstallStrategy() error { } func (c *KubeVirtController) getImageTag(kv *v1.KubeVirt) string { - if kv.Spec.ImageTag == "" { + if kv.Status.TargetKubeVirtVersion != "" { + return kv.Status.TargetKubeVirtVersion + } else if kv.Spec.ImageTag == "" { return c.config.ImageTag } return kv.Spec.ImageTag } func (c *KubeVirtController) getImageRegistry(kv *v1.KubeVirt) string { - if kv.Spec.ImageRegistry == "" { + if kv.Status.TargetKubeVirtRegistry != "" { + return kv.Status.TargetKubeVirtRegistry + } else if kv.Spec.ImageRegistry == "" { return c.config.ImageRegistry } return kv.Spec.ImageRegistry @@ -590,7 +595,7 @@ func (c *KubeVirtController) getImageRegistry(kv *v1.KubeVirt) string { // Loads install strategies into memory, and generates jobs to // create install strategies that don't exist yet. -func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrategy.InstallStrategy, bool, error) { +func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt, imageTag string, registry string) (*installstrategy.InstallStrategy, bool, error) { kvkey, err := controller.KeyFunc(kv) if err != nil { @@ -598,17 +603,17 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat } // 1. see if we already loaded the install strategy - strategy, ok := c.getInstallStrategyFromMap(c.getImageTag(kv), c.getImageRegistry(kv)) + strategy, ok := c.getInstallStrategyFromMap(imageTag, registry) if ok { // we already loaded this strategy into memory return strategy, false, nil } // 2. look for install strategy config map in cache. - strategy, err = installstrategy.LoadInstallStrategyFromCache(c.stores, kv.Namespace, c.getImageTag(kv), c.getImageRegistry(kv)) + strategy, err = installstrategy.LoadInstallStrategyFromCache(c.stores, kv.Namespace, imageTag, registry) if err == nil { - c.cacheInstallStrategyInMap(strategy, c.getImageTag(kv), c.getImageRegistry(kv)) - log.Log.Infof("Loaded install strategy for kubevirt version %s into cache", c.getImageTag(kv)) + c.cacheInstallStrategyInMap(strategy, imageTag, registry) + log.Log.Infof("Loaded install strategy for kubevirt version %s into cache", imageTag) return strategy, false, nil } @@ -626,7 +631,7 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat // job completed but we don't have a install strategy still // delete the job and we'll re-execute it once it is removed. - log.Log.Object(cachedJob).Errorf("Job failed to create install strategy for version %s", c.getImageTag(kv)) + log.Log.Object(cachedJob).Errorf("Job failed to create install strategy for version %s", imageTag) if cachedJob.DeletionTimestamp == nil { // Just in case there's an issue causing the job to fail @@ -661,7 +666,7 @@ func (c *KubeVirtController) loadInstallStrategy(kv *v1.KubeVirt) (*installstrat return nil, true, err } - log.Log.Object(cachedJob).Errorf("Deleting job for install strategy version %s because configmap was not generated", c.getImageTag(kv)) + log.Log.Object(cachedJob).Errorf("Deleting job for install strategy version %s because configmap was not generated", imageTag) } // waiting on deleted job to disappear before re-creating it. return nil, true, err @@ -693,8 +698,23 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { // Set versions... util.SetOperatorVersion(kv) // record the version we're targetting to install - kv.Status.TargetKubeVirtVersion = c.getImageTag(kv) - kv.Status.TargetKubeVirtRegistry = c.getImageRegistry(kv) + if kv.Status.TargetKubeVirtVersion == "" { + kv.Status.TargetKubeVirtVersion = c.getImageTag(kv) + } + if kv.Status.TargetKubeVirtRegistry == "" { + kv.Status.TargetKubeVirtRegistry = c.getImageRegistry(kv) + } + + // TODO once updates are enabled, we'll allow transitioning between image tags. + // for now, we don't support this though. + if kv.Spec.ImageTag != "" && kv.Spec.ImageTag != kv.Status.TargetKubeVirtVersion { + util.UpdateCondition(kv, v1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonUpdateNotImplementedError, fmt.Sprintf("Unable to update to image tag %s because updates are not yet supported", kv.Spec.ImageTag)) + return nil + } + if kv.Spec.ImageRegistry != "" && kv.Spec.ImageRegistry != kv.Status.TargetKubeVirtRegistry { + util.UpdateCondition(kv, v1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonUpdateNotImplementedError, fmt.Sprintf("Unable to update to image in registry %s because updates are not yet supported", kv.Spec.ImageRegistry)) + return nil + } // Set phase to deploying kv.Status.Phase = v1.KubeVirtPhaseDeploying @@ -718,7 +738,7 @@ func (c *KubeVirtController) syncDeployment(kv *v1.KubeVirt) error { // add finalizer to prevent deletion of CR before KubeVirt was undeployed util.AddFinalizer(kv) - strategy, pending, err := c.loadInstallStrategy(kv) + strategy, pending, err := c.loadInstallStrategy(kv, c.getImageTag(kv), c.getImageRegistry(kv)) if err != nil { return err } @@ -805,7 +825,7 @@ func (c *KubeVirtController) syncDeletion(kv *v1.KubeVirt) error { logger := log.Log.Object(kv) logger.Info("Handling deletion") - strategy, pending, err := c.loadInstallStrategy(kv) + strategy, pending, err := c.loadInstallStrategy(kv, c.getImageTag(kv), c.getImageRegistry(kv)) if err != nil { return err } From 4edb657c9ba7d2f68ddd1a3cb431e7aa0a232e6c Mon Sep 17 00:00:00 2001 From: David Vossel Date: Fri, 1 Mar 2019 14:12:47 -0500 Subject: [PATCH 08/13] unit tests for preventing updates until update logic is implemented Signed-off-by: David Vossel --- pkg/virt-operator/kubevirt_test.go | 56 ++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/pkg/virt-operator/kubevirt_test.go b/pkg/virt-operator/kubevirt_test.go index f04286447c27..b8ced720c7c5 100644 --- a/pkg/virt-operator/kubevirt_test.go +++ b/pkg/virt-operator/kubevirt_test.go @@ -665,11 +665,11 @@ var _ = Describe("KubeVirt Operator", func() { }).Times(times) } - shouldExpectKubeVirtUpdateFailureCondition := func() { + shouldExpectKubeVirtUpdateFailureCondition := func(reason string) { update := kvInterface.EXPECT().Update(gomock.Any()) update.Do(func(kv *v1.KubeVirt) { Expect(len(kv.Status.Conditions)).To(Equal(1)) - Expect(kv.Status.Conditions[0].Reason).To(Equal(ConditionReasonDeploymentFailedExisting)) + Expect(kv.Status.Conditions[0].Reason).To(Equal(reason)) kvInformer.GetStore().Update(kv) update.Return(kv, nil) }).Times(1) @@ -796,6 +796,56 @@ var _ = Describe("KubeVirt Operator", func() { }, 15) + // TODO this test case will be removed once updates are implemented + It("shouldn't allow updating kubevirt image tag until updates are implemented", func(done Done) { + defer close(done) + + kv := &v1.KubeVirt{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-install", + Namespace: NAMESPACE, + UID: "11111111111", + Finalizers: []string{util.KubeVirtFinalizer}, + }, + Spec: v1.KubeVirtSpec{ + ImageTag: "1.1.1", + }, + Status: v1.KubeVirtStatus{ + TargetKubeVirtVersion: "2.2.2.2", + }, + } + + addKubeVirt(kv) + + shouldExpectKubeVirtUpdateFailureCondition(ConditionReasonUpdateNotImplementedError) + controller.Execute() + }, 15) + + // TODO this test case will be removed once updates are implemented + It("shouldn't allow updating kubevirt image registry until updates are implemented", func(done Done) { + defer close(done) + + kv := &v1.KubeVirt{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-install", + Namespace: NAMESPACE, + UID: "11111111111", + Finalizers: []string{util.KubeVirtFinalizer}, + }, + Spec: v1.KubeVirtSpec{ + ImageRegistry: "someregistry1", + }, + Status: v1.KubeVirtStatus{ + TargetKubeVirtRegistry: "someregistry2", + }, + } + + addKubeVirt(kv) + + shouldExpectKubeVirtUpdateFailureCondition(ConditionReasonUpdateNotImplementedError) + controller.Execute() + }, 15) + It("should fail if KubeVirt object already exists", func(done Done) { defer close(done) @@ -838,7 +888,7 @@ var _ = Describe("KubeVirt Operator", func() { addKubeVirt(kv1) addKubeVirt(kv2) - shouldExpectKubeVirtUpdateFailureCondition() + shouldExpectKubeVirtUpdateFailureCondition(ConditionReasonDeploymentFailedExisting) controller.execute(fmt.Sprintf("%s/%s", kv2.Namespace, kv2.Name)) From 0e53ca28afd8ea2bd5b02d27ed065d5db903c200 Mon Sep 17 00:00:00 2001 From: David Vossel Date: Fri, 1 Mar 2019 15:36:23 -0500 Subject: [PATCH 09/13] Operator function test with alternate image tag alias Signed-off-by: David Vossel --- cluster/ephemeral-provider-common.sh | 4 ++ hack/config.sh | 4 +- hack/functests.sh | 2 +- pkg/virt-operator/BUILD.bazel | 2 - .../install-strategy/BUILD.bazel | 1 + pkg/virt-operator/kubevirt.go | 6 +-- tests/operator_test.go | 38 +++++++++++++++++++ tests/utils.go | 2 + 8 files changed, 51 insertions(+), 8 deletions(-) diff --git a/cluster/ephemeral-provider-common.sh b/cluster/ephemeral-provider-common.sh index 96d5666f4552..34606e4a126b 100644 --- a/cluster/ephemeral-provider-common.sh +++ b/cluster/ephemeral-provider-common.sh @@ -17,6 +17,7 @@ function prepare_config() { cat >hack/config-provider-$KUBEVIRT_PROVIDER.sh < Date: Mon, 4 Mar 2019 16:48:31 -0500 Subject: [PATCH 10/13] TTY is needed for docker pull and docker tag to synchronize with the build 'docker pull' behaves differently without a tty. Signed-off-by: David Vossel --- cluster/ephemeral-provider-common.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cluster/ephemeral-provider-common.sh b/cluster/ephemeral-provider-common.sh index 34606e4a126b..96083d64a9f8 100644 --- a/cluster/ephemeral-provider-common.sh +++ b/cluster/ephemeral-provider-common.sh @@ -2,7 +2,9 @@ set -e -_cli="docker run --privileged --net=host --rm ${USE_TTY} -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:4a4565eb4487be95f464cb942590bbaa980a5b56acb32d955f7a9f81f4ba843c" +_cli_container="kubevirtci/gocli@sha256:4a4565eb4487be95f464cb942590bbaa980a5b56acb32d955f7a9f81f4ba843c" +_cli_with_tty="docker run --privileged --net=host --rm -t -v /var/run/docker.sock:/var/run/docker.sock ${_cli_container}" +_cli="docker run --privileged --net=host --rm ${USE_TTY} -v /var/run/docker.sock:/var/run/docker.sock ${_cli_container}" function _main_ip() { echo 127.0.0.1 @@ -55,9 +57,9 @@ function build() { container_alt_alias="${container_alt_alias} ${manifest_docker_prefix}/${name}:${docker_tag} ${manifest_docker_prefix}/${name}:${docker_tag_alt}" done for i in $(seq 1 ${KUBEVIRT_NUM_NODES}); do - ${_cli} ssh --prefix $provider_prefix "node$(printf "%02d" ${i})" "echo \"${container}\" | xargs \-\-max-args=1 sudo docker pull" - ${_cli} ssh --prefix $provider_prefix "node$(printf "%02d" ${i})" "echo \"${container_alias}\" | xargs \-\-max-args=2 sudo docker tag" - ${_cli} ssh --prefix $provider_prefix "node$(printf "%02d" ${i})" "echo \"${container_alt_alias}\" | xargs \-\-max-args=2 sudo docker tag" + ${_cli_with_tty} ssh --prefix $provider_prefix "node$(printf "%02d" ${i})" "echo \"${container}\" | xargs \-\-max-args=1 sudo docker pull" + ${_cli_with_tty} ssh --prefix $provider_prefix "node$(printf "%02d" ${i})" "echo \"${container_alias}\" | xargs \-\-max-args=2 sudo docker tag" + ${_cli_with_tty} ssh --prefix $provider_prefix "node$(printf "%02d" ${i})" "echo \"${container_alt_alias}\" | xargs \-\-max-args=2 sudo docker tag" done } From 71f986d82119a8ef7c2015855d4b947261008e5a Mon Sep 17 00:00:00 2001 From: David Vossel Date: Wed, 6 Mar 2019 15:07:14 -0500 Subject: [PATCH 11/13] Patch scc instead of using Update Signed-off-by: David Vossel --- .../install-strategy/BUILD.bazel | 1 + .../install-strategy/strategy.go | 24 ++++++++++---- pkg/virt-operator/kubevirt_test.go | 32 +++++++++++-------- 3 files changed, 37 insertions(+), 20 deletions(-) diff --git a/pkg/virt-operator/install-strategy/BUILD.bazel b/pkg/virt-operator/install-strategy/BUILD.bazel index 063000cfe82e..e1cb18314450 100644 --- a/pkg/virt-operator/install-strategy/BUILD.bazel +++ b/pkg/virt-operator/install-strategy/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) diff --git a/pkg/virt-operator/install-strategy/strategy.go b/pkg/virt-operator/install-strategy/strategy.go index 052f9bb95527..7e042a8e040e 100644 --- a/pkg/virt-operator/install-strategy/strategy.go +++ b/pkg/virt-operator/install-strategy/strategy.go @@ -22,6 +22,7 @@ package installstrategy import ( "bufio" "bytes" + "encoding/json" "fmt" "strings" @@ -34,6 +35,7 @@ import ( extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "kubevirt.io/kubevirt/pkg/api/v1" "kubevirt.io/kubevirt/pkg/controller" @@ -658,10 +660,15 @@ func DeleteAll(kv *v1.KubeVirt, } if modified { - privSccCopy.Users = users - _, err = scc.SecurityContextConstraints().Update(privSccCopy) + userBytes, err := json.Marshal(users) if err != nil { - return fmt.Errorf("unable to update scc: %v", err) + return err + } + + data := []byte(fmt.Sprintf(`{"users": %s}`, userBytes)) + _, err = scc.SecurityContextConstraints().Patch(sccPriv.TargetScc, types.StrategicMergePatchType, data) + if err != nil { + return fmt.Errorf("unable to patch scc: %v", err) } } } @@ -863,10 +870,15 @@ func CreateAll(kv *v1.KubeVirt, } if modified { - privSccCopy.Users = users - _, err = scc.SecurityContextConstraints().Update(privSccCopy) + userBytes, err := json.Marshal(users) + if err != nil { + return objectsAdded, err + } + + data := []byte(fmt.Sprintf(`{"users": %s}`, userBytes)) + _, err = scc.SecurityContextConstraints().Patch(sccPriv.TargetScc, types.StrategicMergePatchType, data) if err != nil { - return objectsAdded, fmt.Errorf("unable to update scc: %v", err) + return objectsAdded, fmt.Errorf("unable to patch scc: %v", err) } } } diff --git a/pkg/virt-operator/kubevirt_test.go b/pkg/virt-operator/kubevirt_test.go index b8ced720c7c5..cc320752ee86 100644 --- a/pkg/virt-operator/kubevirt_test.go +++ b/pkg/virt-operator/kubevirt_test.go @@ -20,6 +20,7 @@ package virt_operator import ( + "encoding/json" "fmt" "os" "time" @@ -577,10 +578,15 @@ var _ = Describe("KubeVirt Operator", func() { deleteResource(delete.GetResource().Resource, key) return true, nil, nil } - expectUsers := func(sccObj runtime.Object, count int) { - scc, ok := sccObj.(*secv1.SecurityContextConstraints) - ExpectWithOffset(2, ok).To(BeTrue()) - ExpectWithOffset(2, len(scc.Users)).To(Equal(count)) + expectUsers := func(userBytes []byte, count int) { + + type _users struct { + Users []string `json:"users"` + } + users := &_users{} + + json.Unmarshal(userBytes, users) + ExpectWithOffset(2, len(users.Users)).To(Equal(count)) } shouldExpectInstallStrategyDeletion := func() { @@ -605,11 +611,10 @@ var _ = Describe("KubeVirt Operator", func() { kubeClient.Fake.PrependReactor("delete", "roles", genericDeleteFunc) kubeClient.Fake.PrependReactor("delete", "rolebindings", genericDeleteFunc) - secClient.Fake.PrependReactor("update", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) { - update, _ := action.(testing.UpdateAction) - updatedObj := update.GetObject() - expectUsers(updatedObj, 1) - return true, updatedObj, nil + secClient.Fake.PrependReactor("patch", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) { + patch, _ := action.(testing.PatchAction) + expectUsers(patch.GetPatch(), 1) + return true, nil, nil }) extClient.Fake.PrependReactor("delete", "customresourcedefinitions", genericDeleteFunc) @@ -633,11 +638,10 @@ var _ = Describe("KubeVirt Operator", func() { kubeClient.Fake.PrependReactor("create", "roles", genericCreateFunc) kubeClient.Fake.PrependReactor("create", "rolebindings", genericCreateFunc) - secClient.Fake.PrependReactor("update", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) { - update, _ := action.(testing.UpdateAction) - updatedObj := update.GetObject() - expectUsers(updatedObj, 4) - return true, updatedObj, nil + secClient.Fake.PrependReactor("patch", "securitycontextconstraints", func(action testing.Action) (handled bool, obj runtime.Object, err error) { + patch, _ := action.(testing.PatchAction) + expectUsers(patch.GetPatch(), 4) + return true, nil, nil }) extClient.Fake.PrependReactor("create", "customresourcedefinitions", genericCreateFunc) From f268858b76c38e88d92a12a29414773b8d845ceb Mon Sep 17 00:00:00 2001 From: David Vossel Date: Wed, 6 Mar 2019 15:11:35 -0500 Subject: [PATCH 12/13] Rename Scc vars SCC to be consistent Signed-off-by: David Vossel --- .../install-strategy/strategy.go | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/virt-operator/install-strategy/strategy.go b/pkg/virt-operator/install-strategy/strategy.go index 7e042a8e040e..690bf5dbf3a5 100644 --- a/pkg/virt-operator/install-strategy/strategy.go +++ b/pkg/virt-operator/install-strategy/strategy.go @@ -48,16 +48,16 @@ import ( marshalutil "kubevirt.io/kubevirt/tools/util" ) -const customSccPrivilegedAccountsType = "KubevirtCustomSccRule" +const customSCCPrivilegedAccountsType = "KubevirtCustomSCCRule" -type customSccPrivilegedAccounts struct { +type customSCCPrivilegedAccounts struct { // this isn't a real k8s object. We use the meta type // because it gives a consistent way to separate k8s // objects from our custom actions metav1.TypeMeta `json:",inline"` // this is the target scc we're adding service accounts to - TargetScc string `json:"TargetScc"` + TargetSCC string `json:"TargetSCC"` // these are the service accounts being added to the scc ServiceAccounts []string `json:"serviceAccounts"` @@ -78,7 +78,7 @@ type InstallStrategy struct { deployments []*appsv1.Deployment daemonSets []*appsv1.DaemonSet - customSccPrivileges []*customSccPrivilegedAccounts + customSCCPrivileges []*customSCCPrivilegedAccounts } func NewInstallStrategyConfigMap(namespace string, imageTag string, imageRegistry string) (*corev1.ConfigMap, error) { @@ -177,7 +177,7 @@ func dumpInstallStrategyToBytes(strategy *InstallStrategy) []byte { for _, entry := range strategy.daemonSets { marshalutil.MarshallObject(entry, writer) } - for _, entry := range strategy.customSccPrivileges { + for _, entry := range strategy.customSCCPrivileges { marshalutil.MarshallObject(entry, writer) } writer.Flush() @@ -254,11 +254,11 @@ func GenerateCurrentInstallStrategy(namespace string, prefix := "system:serviceaccount" typeMeta := metav1.TypeMeta{ - Kind: customSccPrivilegedAccountsType, + Kind: customSCCPrivilegedAccountsType, } - strategy.customSccPrivileges = append(strategy.customSccPrivileges, &customSccPrivilegedAccounts{ + strategy.customSCCPrivileges = append(strategy.customSCCPrivileges, &customSCCPrivilegedAccounts{ TypeMeta: typeMeta, - TargetScc: "privileged", + TargetSCC: "privileged", ServiceAccounts: []string{ fmt.Sprintf("%s:%s:%s", prefix, namespace, "kubevirt-handler"), fmt.Sprintf("%s:%s:%s", prefix, namespace, "kubevirt-apiserver"), @@ -395,12 +395,12 @@ func loadInstallStrategyFromBytes(data string) (*InstallStrategy, error) { return nil, err } strategy.crds = append(strategy.crds, crd) - case customSccPrivilegedAccountsType: - priv := &customSccPrivilegedAccounts{} + case customSCCPrivilegedAccountsType: + priv := &customSCCPrivilegedAccounts{} if err := yaml.Unmarshal([]byte(entry), &priv); err != nil { return nil, err } - strategy.customSccPrivileges = append(strategy.customSccPrivileges, priv) + strategy.customSCCPrivileges = append(strategy.customSCCPrivileges, priv) default: return nil, fmt.Errorf("UNKNOWN TYPE %s detected", obj.Kind) @@ -637,22 +637,22 @@ func DeleteAll(kv *v1.KubeVirt, } scc := clientset.SecClient() - for _, sccPriv := range strategy.customSccPrivileges { - privSccObj, exists, err := stores.SCCCache.GetByKey(sccPriv.TargetScc) + for _, sccPriv := range strategy.customSCCPrivileges { + privSCCObj, exists, err := stores.SCCCache.GetByKey(sccPriv.TargetSCC) if !exists { return nil } else if err != nil { return err } - privScc, ok := privSccObj.(*secv1.SecurityContextConstraints) + privSCC, ok := privSCCObj.(*secv1.SecurityContextConstraints) if !ok { - return fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSccObj) + return fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSCCObj) } - privSccCopy := privScc.DeepCopy() + privSCCCopy := privSCC.DeepCopy() modified := false - users := privSccCopy.Users + users := privSCCCopy.Users for _, acc := range sccPriv.ServiceAccounts { removed := false users, removed = remove(users, acc) @@ -666,7 +666,7 @@ func DeleteAll(kv *v1.KubeVirt, } data := []byte(fmt.Sprintf(`{"users": %s}`, userBytes)) - _, err = scc.SecurityContextConstraints().Patch(sccPriv.TargetScc, types.StrategicMergePatchType, data) + _, err = scc.SecurityContextConstraints().Patch(sccPriv.TargetSCC, types.StrategicMergePatchType, data) if err != nil { return fmt.Errorf("unable to patch scc: %v", err) } @@ -846,22 +846,22 @@ func CreateAll(kv *v1.KubeVirt, } // Add service accounts to SCC - for _, sccPriv := range strategy.customSccPrivileges { - privSccObj, exists, err := stores.SCCCache.GetByKey(sccPriv.TargetScc) + for _, sccPriv := range strategy.customSCCPrivileges { + privSCCObj, exists, err := stores.SCCCache.GetByKey(sccPriv.TargetSCC) if !exists { return objectsAdded, nil } else if err != nil { return objectsAdded, err } - privScc, ok := privSccObj.(*secv1.SecurityContextConstraints) + privSCC, ok := privSCCObj.(*secv1.SecurityContextConstraints) if !ok { - return objectsAdded, fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSccObj) + return objectsAdded, fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", privSCCObj) } - privSccCopy := privScc.DeepCopy() + privSCCCopy := privSCC.DeepCopy() modified := false - users := privSccCopy.Users + users := privSCCCopy.Users for _, acc := range sccPriv.ServiceAccounts { if !contains(users, acc) { users = append(users, acc) @@ -876,7 +876,7 @@ func CreateAll(kv *v1.KubeVirt, } data := []byte(fmt.Sprintf(`{"users": %s}`, userBytes)) - _, err = scc.SecurityContextConstraints().Patch(sccPriv.TargetScc, types.StrategicMergePatchType, data) + _, err = scc.SecurityContextConstraints().Patch(sccPriv.TargetSCC, types.StrategicMergePatchType, data) if err != nil { return objectsAdded, fmt.Errorf("unable to patch scc: %v", err) } From 584568c8bc074982a2287c26d364acb063b7201e Mon Sep 17 00:00:00 2001 From: David Vossel Date: Wed, 6 Mar 2019 17:19:19 -0500 Subject: [PATCH 13/13] Make operator custom image functional test work with crio providers crio tooling in our prioviders doesn't support 'docker tag' so we have to push the same image to our local registry with the alternate tag Signed-off-by: David Vossel --- Makefile | 2 +- cluster/ephemeral-provider-common.sh | 9 +++------ hack/bazel-push-images.sh | 9 +++++++++ hack/config-default.sh | 1 + pkg/virt-operator/kubevirt.go | 2 +- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 3ae61acacee2..671bc8829f34 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ bazel-build: //cmd/..." bazel-push-images: - hack/dockerized "DOCKER_PREFIX=${DOCKER_PREFIX} DOCKER_TAG=${DOCKER_TAG} ./hack/bazel-push-images.sh" + hack/dockerized "DOCKER_PREFIX=${DOCKER_PREFIX} DOCKER_TAG=${DOCKER_TAG} DOCKER_TAG_ALT=${DOCKER_TAG_ALT} ./hack/bazel-push-images.sh" bazel-tests: hack/dockerized "bazel test --test_output=errors -- //pkg/... " diff --git a/cluster/ephemeral-provider-common.sh b/cluster/ephemeral-provider-common.sh index 96083d64a9f8..55a59bd2a5ba 100644 --- a/cluster/ephemeral-provider-common.sh +++ b/cluster/ephemeral-provider-common.sh @@ -19,7 +19,7 @@ function prepare_config() { cat >hack/config-provider-$KUBEVIRT_PROVIDER.sh <