From 8845905697d534e0b2da8b7590dbd7cec28e0c7c Mon Sep 17 00:00:00 2001 From: Lukianov Artyom Date: Tue, 7 Nov 2017 16:55:39 +0200 Subject: [PATCH] Move our manifests to kube-system namespace Signed-off-by: Lukianov Artyom --- automation/test.sh | 21 +++++++------------ cluster/deploy.sh | 1 + cluster/vagrant/setup_kubernetes_common.sh | 2 +- manifests/haproxy.yaml.in | 2 ++ manifests/iscsi-auth-demo-target.yaml.in | 3 +++ manifests/iscsi-demo-target.yaml.in | 8 ++++--- manifests/libvirt.yaml.in | 1 + manifests/rbac.authorization.k8s.io.yaml.in | 9 ++++++-- manifests/squid.yaml.in | 2 ++ manifests/virt-api.yaml.in | 2 ++ manifests/virt-controller.yaml.in | 2 ++ manifests/virt-handler.yaml.in | 1 + manifests/virt-manifest.yaml.in | 2 ++ pkg/controller/expectations_test.go | 2 +- .../leaderelectionconfig/config.go | 2 +- tests/storage_test.go | 6 +++--- tests/utils.go | 8 +++---- 17 files changed, 45 insertions(+), 29 deletions(-) diff --git a/automation/test.sh b/automation/test.sh index 536762cd0c89..c9b216a07f21 100644 --- a/automation/test.sh +++ b/automation/test.sh @@ -113,34 +113,27 @@ done cluster/sync.sh # Wait until kubevirt pods are running -while [ -n "$(kubectl get pods --no-headers | grep -v Running)" ]; do +while [ -n "$(kubectl get pods -n kube-system --no-headers | grep -v Running)" ]; do echo "Waiting for kubevirt pods to enter the Running state ..." - kubectl get pods --no-headers | >&2 grep -v Running || true + kubectl get pods -n kube-system --no-headers | >&2 grep -v Running || true sleep 10 done # Make sure all containers except virt-controller are ready -while [ -n "$(kubectl get pods -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '!/virt-controller/ && /false/')" ]; do +while [ -n "$(kubectl get pods -n kube-system -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '!/virt-controller/ && /false/')" ]; do echo "Waiting for KubeVirt containers to become ready ..." - kubectl get pods -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '!/virt-controller/ && /false/' || true + kubectl get pods -n kube-system -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '!/virt-controller/ && /false/' || true sleep 10 done # Make sure that at least one virt-controller container is ready -while [ "$(kubectl get pods -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '/virt-controller/ && /true/' | wc -l)" -lt "1" ]; do +while [ "$(kubectl get pods -n kube-system -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '/virt-controller/ && /true/' | wc -l)" -lt "1" ]; do echo "Waiting for KubeVirt virt-controller container to become ready ..." - kubectl get pods -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '/virt-controller/ && /true/' | wc -l + kubectl get pods -n kube-system -o'custom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers | awk '/virt-controller/ && /true/' | wc -l sleep 10 done -# Wait until all pods are running -while [ -n "$(kubectl get pods --no-headers --all-namespaces | grep -v Running)" ]; do - echo "Waiting for pods in all namespaces to enter the Running state ..." - kubectl get pods --no-headers --all-namespaces | >&2 grep -v Running || true - sleep 5 -done - -kubectl get pods --all-namespaces +kubectl get pods -n kube-system kubectl version # Disable proxy configuration since it causes test issues diff --git a/cluster/deploy.sh b/cluster/deploy.sh index b637e4227c51..0cb59eb99595 100755 --- a/cluster/deploy.sh +++ b/cluster/deploy.sh @@ -34,6 +34,7 @@ apiVersion: v1 kind: Service metadata: name: external-$NAME + namespace: kube-system spec: externalIPs: - "$master_ip" diff --git a/cluster/vagrant/setup_kubernetes_common.sh b/cluster/vagrant/setup_kubernetes_common.sh index 373187469c98..35f48c735e4d 100755 --- a/cluster/vagrant/setup_kubernetes_common.sh +++ b/cluster/vagrant/setup_kubernetes_common.sh @@ -79,7 +79,7 @@ yum install -y \ # Latest docker on CentOS uses systemd for cgroup management cat << EOT >>/etc/systemd/system/kubelet.service.d/09-kubeadm.conf [Service] -Environment="KUBELET_EXTRA_ARGS=--cgroup-driver=systemd --fail-swap-on=false" +Environment="KUBELET_EXTRA_ARGS=--cgroup-driver=systemd --fail-swap-on=false --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice" EOT systemctl daemon-reload diff --git a/manifests/haproxy.yaml.in b/manifests/haproxy.yaml.in index af2b47c6b59c..31b4a59e5ecd 100644 --- a/manifests/haproxy.yaml.in +++ b/manifests/haproxy.yaml.in @@ -2,6 +2,7 @@ apiVersion: v1 kind: Service metadata: name: haproxy + namespace: kube-system spec: ports: - port: 8184 @@ -13,6 +14,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: haproxy + namespace: kube-system spec: template: metadata: diff --git a/manifests/iscsi-auth-demo-target.yaml.in b/manifests/iscsi-auth-demo-target.yaml.in index 8fddd35e63ef..285dcfd7d67c 100644 --- a/manifests/iscsi-auth-demo-target.yaml.in +++ b/manifests/iscsi-auth-demo-target.yaml.in @@ -2,6 +2,7 @@ apiVersion: v1 kind: Secret metadata: name: iscsi-auth-demo-secret + namespace: kube-system type: "kubernetes.io/iscsi-chap" data: node.session.auth.username: ZGVtb3VzZXI= @@ -11,6 +12,7 @@ apiVersion: v1 kind: Service metadata: name: iscsi-auth-demo-target + namespace: kube-system spec: ports: - name: iscsi @@ -23,6 +25,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: iscsi-auth-demo-target-tgtd + namespace: kube-system spec: template: metadata: diff --git a/manifests/iscsi-demo-target.yaml.in b/manifests/iscsi-demo-target.yaml.in index fd34a73d9eb7..cd506987cf67 100644 --- a/manifests/iscsi-demo-target.yaml.in +++ b/manifests/iscsi-demo-target.yaml.in @@ -59,7 +59,7 @@ spec: iscsi: iqn: iqn.2017-01.io.kubevirt:sn.42 lun: 1 - targetPortal: iscsi-demo-target.default.svc.cluster.local + targetPortal: iscsi-demo-target.kube-system.svc.cluster.local --- apiVersion: v1 kind: PersistentVolume @@ -75,7 +75,7 @@ spec: iscsi: iqn: iqn.2017-01.io.kubevirt:sn.42 lun: 2 - targetPortal: iscsi-demo-target.default.svc.cluster.local + targetPortal: iscsi-demo-target.kube-system.svc.cluster.local --- apiVersion: v1 kind: PersistentVolume @@ -91,12 +91,13 @@ spec: iscsi: iqn: iqn.2017-01.io.kubevirt:sn.42 lun: 3 - targetPortal: iscsi-demo-target.default.svc.cluster.local + targetPortal: iscsi-demo-target.kube-system.svc.cluster.local --- apiVersion: v1 kind: Service metadata: name: iscsi-demo-target + namespace: kube-system spec: ports: - name: iscsi @@ -109,6 +110,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: iscsi-demo-target-tgtd + namespace: kube-system spec: template: metadata: diff --git a/manifests/libvirt.yaml.in b/manifests/libvirt.yaml.in index d63cfa56e589..61c034cd528d 100644 --- a/manifests/libvirt.yaml.in +++ b/manifests/libvirt.yaml.in @@ -2,6 +2,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: libvirt + namespace: kube-system spec: template: metadata: diff --git a/manifests/rbac.authorization.k8s.io.yaml.in b/manifests/rbac.authorization.k8s.io.yaml.in index 3ae69f9c7bcd..f44eb3671f66 100644 --- a/manifests/rbac.authorization.k8s.io.yaml.in +++ b/manifests/rbac.authorization.k8s.io.yaml.in @@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: kubevirt-infra + namespace: kube-system labels: name: kubevirt rules: @@ -44,6 +45,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kubevirt-infra + namespace: kube-system labels: name: kubevirt --- @@ -51,6 +53,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kubevirt-admin + namespace: kube-system labels: name: kubevirt-admin --- @@ -58,6 +61,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: kubevirt-infra + namespace: kube-system labels: name: kubevirt roleRef: @@ -67,12 +71,13 @@ roleRef: subjects: - kind: ServiceAccount name: kubevirt-infra - namespace: default + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: kubevirt-infra-cluster-admin + namespace: kube-system labels: name: kubevirt roleRef: @@ -82,4 +87,4 @@ roleRef: subjects: - kind: ServiceAccount name: kubevirt-infra - namespace: default + namespace: kube-system diff --git a/manifests/squid.yaml.in b/manifests/squid.yaml.in index f93d4ba1babe..53b9cdc34464 100644 --- a/manifests/squid.yaml.in +++ b/manifests/squid.yaml.in @@ -2,6 +2,7 @@ apiVersion: v1 kind: Service metadata: name: spice-proxy + namespace: kube-system spec: ports: - port: 3128 @@ -13,6 +14,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: spice-proxy + namespace: kube-system spec: template: metadata: diff --git a/manifests/virt-api.yaml.in b/manifests/virt-api.yaml.in index 000e732d4134..3b0fa1baaf85 100644 --- a/manifests/virt-api.yaml.in +++ b/manifests/virt-api.yaml.in @@ -2,6 +2,7 @@ apiVersion: v1 kind: Service metadata: name: virt-api + namespace: kube-system spec: ports: - port: 8183 @@ -13,6 +14,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: virt-api + namespace: kube-system spec: template: metadata: diff --git a/manifests/virt-controller.yaml.in b/manifests/virt-controller.yaml.in index b29eced8702c..9aea3e7077c8 100644 --- a/manifests/virt-controller.yaml.in +++ b/manifests/virt-controller.yaml.in @@ -2,6 +2,7 @@ apiVersion: v1 kind: Service metadata: name: virt-controller + namespace: kube-system spec: ports: - port: 8182 @@ -13,6 +14,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: virt-controller + namespace: kube-system spec: replicas: 2 template: diff --git a/manifests/virt-handler.yaml.in b/manifests/virt-handler.yaml.in index e817d4eae85f..4a70a504f050 100644 --- a/manifests/virt-handler.yaml.in +++ b/manifests/virt-handler.yaml.in @@ -2,6 +2,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: virt-handler + namespace: kube-system spec: template: metadata: diff --git a/manifests/virt-manifest.yaml.in b/manifests/virt-manifest.yaml.in index dfa4a5946abf..a1f63d4d0723 100644 --- a/manifests/virt-manifest.yaml.in +++ b/manifests/virt-manifest.yaml.in @@ -2,6 +2,7 @@ apiVersion: v1 kind: Service metadata: name: virt-manifest + namespace: kube-system spec: ports: - port: 8186 @@ -13,6 +14,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: virt-manifest + namespace: kube-system spec: template: metadata: diff --git a/pkg/controller/expectations_test.go b/pkg/controller/expectations_test.go index cc84aa0f8c1c..69c45ffd5243 100644 --- a/pkg/controller/expectations_test.go +++ b/pkg/controller/expectations_test.go @@ -70,7 +70,7 @@ func newReplicationController(replicas int) *v1.ReplicationController { ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: "foobar", - Namespace: metav1.NamespaceDefault, + Namespace: metav1.NamespaceSystem, ResourceVersion: "18", }, Spec: v1.ReplicationControllerSpec{ diff --git a/pkg/virt-controller/leaderelectionconfig/config.go b/pkg/virt-controller/leaderelectionconfig/config.go index 129b1111dfc3..4197a58e163f 100644 --- a/pkg/virt-controller/leaderelectionconfig/config.go +++ b/pkg/virt-controller/leaderelectionconfig/config.go @@ -31,7 +31,7 @@ const ( DefaultLeaseDuration = 15 * time.Second DefaultRenewDeadline = 10 * time.Second DefaultRetryPeriod = 2 * time.Second - DefaultNamespace = "default" + DefaultNamespace = "kube-system" DefaultEndpointName = "virt-controller" ) diff --git a/tests/storage_test.go b/tests/storage_test.go index 73797c92656c..c00cdb2217bd 100644 --- a/tests/storage_test.go +++ b/tests/storage_test.go @@ -46,7 +46,7 @@ var _ = Describe("Storage", func() { }) getTargetLogs := func(tailLines int64) string { - pods, err := virtClient.CoreV1().Pods(k8sv1.NamespaceDefault).List(metav1.ListOptions{LabelSelector: "app in (iscsi-demo-target)"}) + pods, err := virtClient.CoreV1().Pods(metav1.NamespaceSystem).List(metav1.ListOptions{LabelSelector: "app in (iscsi-demo-target)"}) Expect(err).ToNot(HaveOccurred()) //FIXME Sometimes pods hang in terminating state, select the pod which does not have a deletion timestamp @@ -60,7 +60,7 @@ var _ = Describe("Storage", func() { Expect(podName).ToNot(BeEmpty()) logsRaw, err := virtClient.CoreV1(). - Pods("default"). + Pods(metav1.NamespaceSystem). GetLogs(podName, &k8sv1.PodLogOptions{TailLines: &tailLines}). DoRaw() @@ -99,7 +99,7 @@ var _ = Describe("Storage", func() { Context("Given a fresh iSCSI target", func() { It("should be available and ready", func() { - logs := getTargetLogs(70) + logs := getTargetLogs(75) Expect(logs).To(ContainSubstring("Target 1: iqn.2017-01.io.kubevirt:sn.42")) Expect(logs).To(ContainSubstring("Driver: iscsi")) Expect(logs).To(ContainSubstring("State: ready")) diff --git a/tests/utils.go b/tests/utils.go index 7f99e4b2cae7..580ddc93fe89 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -348,11 +348,11 @@ func newPV(os string, lun int32, withAuth bool) *k8sv1.PersistentVolume { PanicOnError(err) name := fmt.Sprintf("iscsi-disk-%s-for-tests", os) - target := "iscsi-demo-target.default.svc.cluster.local" + target := "iscsi-demo-target.kube-system.svc.cluster.local" label := os if withAuth { name = fmt.Sprintf("iscsi-auth-disk-%s-for-tests", os) - target = "iscsi-auth-demo-target.default.svc.cluster.local" + target = "iscsi-auth-demo-target.kube-system.svc.cluster.local" label = fmt.Sprintf("%s-auth", os) } @@ -602,7 +602,7 @@ func NewRandomVMWithDirectLun(lun int, withAuth bool) *v1.VirtualMachine { }, Source: v1.DiskSource{ Host: &v1.DiskSourceHost{ - Name: "iscsi-demo-target.default", + Name: "iscsi-demo-target.kube-system", Port: "3260", }, Protocol: "iscsi", @@ -618,7 +618,7 @@ func NewRandomVMWithDirectLun(lun int, withAuth bool) *v1.VirtualMachine { Usage: "iscsi-demo-secret", }, } - vm.Spec.Domain.Devices.Disks[0].Source.Host.Name = "iscsi-auth-demo-target.default" + vm.Spec.Domain.Devices.Disks[0].Source.Host.Name = "iscsi-auth-demo-target.kube-system" } return vm }