Skip to content

Commit

Permalink
Update to Kubernetes v1.18
Browse files Browse the repository at this point in the history
Signed-off-by: Mikkel Oscar Lyderik Larsen <[email protected]>
  • Loading branch information
mikkeloscar committed Jun 2, 2020
1 parent e637ec5 commit 4499685
Show file tree
Hide file tree
Showing 23 changed files with 339 additions and 137 deletions.
3 changes: 2 additions & 1 deletion cluster/config-defaults.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ teapot_admission_controller_validate_pod_template_resources: "true"
{{end}}

{{if eq .Environment "e2e"}}
teapot_admission_controller_ignore_namespaces: "^kube-system|((downward-api|kubectl|projected|statefulset|pod-network|scope-selectors|resourcequota)-.*)$"
teapot_admission_controller_ignore_namespaces: "^kube-system|((downward-api|kubectl|projected|statefulset|pod-network|scope-selectors|resourcequota|limitrange)-.*)$"
teapot_admission_controller_crd_ensure_no_resources_on_delete: "false"
{{else}}
teapot_admission_controller_ignore_namespaces: "^kube-system$"
Expand All @@ -217,6 +217,7 @@ coredns_log_svc_names: "true"
coredns_max_upstream_concurrency: 0 #0 means there is not concurrency limits

kuberuntu_image_v1_17: {{ amiID "zalando-ubuntu-kubernetes-production-v1.17.6-master-109" "861068367966" }}
kuberuntu_image_v1_18: {{ amiID "zalando-ubuntu-kubernetes-production-v1.18.3-master-109" "861068367966" }}

# Feature toggle to allow gradual decommissioning of ingress-template-controller
enable_ingress_template_controller: "false"
Expand Down
2 changes: 1 addition & 1 deletion cluster/node-pools/master-default/stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Description: Kubernetes default master node pool
Mappings:
Images:
eu-central-1:
MachineImage: '{{ .Cluster.ConfigItems.kuberuntu_image_v1_17 }}'
MachineImage: '{{ .Cluster.ConfigItems.kuberuntu_image_v1_18 }}'

Resources:
AutoScalingGroup:
Expand Down
8 changes: 3 additions & 5 deletions cluster/node-pools/master-default/userdata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@ write_files:
{{- end }}
featureGates:
BoundServiceAccountTokenVolume: {{ .Cluster.ConfigItems.rotate_service_account_tokens }}
CSIDriverRegistry: {{ .Cluster.ConfigItems.enable_csi_migration }}
CSIBlockVolume: {{ .Cluster.ConfigItems.enable_csi_migration }}
CSIMigration: {{ .Cluster.ConfigItems.enable_csi_migration }}
podPidsLimit: {{ .NodePool.ConfigItems.pod_max_pids }}
maxPods: {{ nodeCIDRMaxPods (parseInt64 .Cluster.ConfigItems.node_cidr_mask_size) 8 }}
Expand Down Expand Up @@ -144,7 +142,7 @@ write_files:
- --authorization-mode=Webhook,RBAC
- --authorization-webhook-config-file=/etc/kubernetes/config/authz.yaml
- --admission-control-config-file=/etc/kubernetes/config/image-policy-webhook.yaml
- --feature-gates=TTLAfterFinished=true,BoundServiceAccountTokenVolume={{ .Cluster.ConfigItems.rotate_service_account_tokens }},EndpointSlice={{ .Cluster.ConfigItems.enable_endpointslice }},HPAScaleToZero={{ .Cluster.ConfigItems.enable_hpa_scale_to_zero }},CSIDriverRegistry={{ .Cluster.ConfigItems.enable_csi_migration }},CSIBlockVolume={{ .Cluster.ConfigItems.enable_csi_migration }},VolumeSnapshotDataSource={{ .Cluster.ConfigItems.enable_csi_migration }},CSIMigration={{ .Cluster.ConfigItems.enable_csi_migration }}
- --feature-gates=TTLAfterFinished=true,BoundServiceAccountTokenVolume={{ .Cluster.ConfigItems.rotate_service_account_tokens }},EndpointSlice={{ .Cluster.ConfigItems.enable_endpointslice }},HPAScaleToZero={{ .Cluster.ConfigItems.enable_hpa_scale_to_zero }},VolumeSnapshotDataSource={{ .Cluster.ConfigItems.enable_csi_migration }},CSIMigration={{ .Cluster.ConfigItems.enable_csi_migration }}
- --anonymous-auth=false
- --service-account-key-file=/etc/kubernetes/ssl/service-account-public-key.pem
{{- if eq .Cluster.ConfigItems.rotate_service_account_tokens "true" }}
Expand Down Expand Up @@ -407,7 +405,7 @@ write_files:
- name: BUSINESS_PARTNERS
value: {{ .Cluster.ConfigItems.apiserver_business_partner_ids }}
{{ end }}
- image: registry.opensource.zalan.do/teapot/image-policy-webhook:0.5.3
- image: registry.opensource.zalan.do/teapot/image-policy-webhook:v0.5.4
name: image-policy-webhook
args:
- --policy={{ .Cluster.ConfigItems.image_policy }}
Expand Down Expand Up @@ -567,7 +565,7 @@ write_files:
- --root-ca-file=/etc/kubernetes/ssl/ca.pem
- --cloud-provider=aws
- --cloud-config=/etc/kubernetes/cloud-config.ini
- --feature-gates=TTLAfterFinished=true,BoundServiceAccountTokenVolume={{ .Cluster.ConfigItems.rotate_service_account_tokens }},CSIMigration={{ .Cluster.ConfigItems.enable_csi_migration }},CSIDriverRegistry={{ .Cluster.ConfigItems.enable_csi_migration }},CSIBlockVolume={{ .Cluster.ConfigItems.enable_csi_migration }}
- --feature-gates=TTLAfterFinished=true,BoundServiceAccountTokenVolume={{ .Cluster.ConfigItems.rotate_service_account_tokens }},CSIMigration={{ .Cluster.ConfigItems.enable_csi_migration }}
- --horizontal-pod-autoscaler-use-rest-clients=true
- --use-service-account-credentials=true
- --configure-cloud-routes=false
Expand Down
2 changes: 1 addition & 1 deletion cluster/node-pools/worker-default/stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Description: Kubernetes default worker node pool
Mappings:
Images:
eu-central-1:
MachineImage: '{{ .Cluster.ConfigItems.kuberuntu_image_v1_17 }}'
MachineImage: '{{ .Cluster.ConfigItems.kuberuntu_image_v1_18 }}'

Resources:
AutoScalingGroup:
Expand Down
2 changes: 0 additions & 2 deletions cluster/node-pools/worker-default/userdata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ write_files:
cpuCFSQuota: false
featureGates:
BoundServiceAccountTokenVolume: {{ .Cluster.ConfigItems.rotate_service_account_tokens }}
CSIDriverRegistry: {{ .Cluster.ConfigItems.enable_csi_migration }}
CSIBlockVolume: {{ .Cluster.ConfigItems.enable_csi_migration }}
CSIMigration: {{ .Cluster.ConfigItems.enable_csi_migration }}
podPidsLimit: {{ .NodePool.ConfigItems.pod_max_pids }}
cpuManagerPolicy: {{ .NodePool.ConfigItems.cpu_manager_policy }}
Expand Down
2 changes: 1 addition & 1 deletion cluster/node-pools/worker-splitaz/stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Description: Kubernetes default worker node pool
Mappings:
Images:
eu-central-1:
MachineImage: '{{ .Cluster.ConfigItems.kuberuntu_image_v1_17 }}'
MachineImage: '{{ .Cluster.ConfigItems.kuberuntu_image_v1_18 }}'

Resources:
{{ with $data := . }}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

BINARY ?= kubernetes-on-aws-e2e
VERSION ?= $(shell git describe --tags --always --dirty)
KUBE_VERSION ?= v1.17.4
KUBE_VERSION ?= v1.18.3
IMAGE ?= registry-write.opensource.zalan.do/teapot/$(BINARY)
TAG ?= $(VERSION)
DOCKERFILE ?= Dockerfile
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ this component is purposed to tests webhooks in the admission controller
package e2e

import (
"context"
"fmt"
"strings"
"time"
Expand All @@ -31,7 +32,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
deploymentframework "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)

Expand Down Expand Up @@ -61,18 +61,18 @@ var _ = framework.KubeDescribe("Admission controller tests", func() {
By("Creating deployment " + nameprefix + " in namespace " + ns)

deployment := createDeploymentWithDeploymentInfo(nameprefix+"-", ns, podname, replicas)
_, err := cs.AppsV1().Deployments(ns).Create(deployment)
_, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
labelSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
err = deploymentframework.WaitForDeploymentWithCondition(cs, ns, deployment.Name, "MinimumReplicasAvailable", appsv1.DeploymentAvailable)
err = waitForDeploymentWithCondition(cs, ns, deployment.Name, "MinimumReplicasAvailable", appsv1.DeploymentAvailable)
Expect(err).NotTo(HaveOccurred())

//pods are not returned here
_, err = e2epod.WaitForPodsWithLabelRunningReady(cs, ns, labelSelector, int(replicas), 1*time.Minute)
Expect(err).NotTo(HaveOccurred())

pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labelSelector.String()})
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector.String()})
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Items)).To(Equal(1))

Expand All @@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Admission controller tests", func() {
Expect(pod.Annotations).To(HaveKeyWithValue("zalando.org/cdp-pipeline-id", pipelineId))

// Check the injected node zone
node, err := cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
node, err := cs.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
nodeZone := node.Labels["topology.kubernetes.io/zone"]
Expect(pod.Annotations).To(HaveKeyWithValue("topology.kubernetes.io/zone", nodeZone))
Expand Down Expand Up @@ -122,7 +122,7 @@ var _ = framework.KubeDescribe("Admission controller tests", func() {

By("Creating pod " + podName + " in namespace " + ns)
pod := createInvalidOwnerPod(ns, podName)
_, err := cs.CoreV1().Pods(ns).Create(pod)
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

err = e2epod.WaitForPodSuccessInNamespaceSlow(cs, podName, ns)
Expand All @@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Admission controller tests", func() {
func fetchEnvarValues(client kubernetes.Interface, ns, pod string) (map[string]string, error) {
result := make(map[string]string)

bytes, err := client.CoreV1().Pods(ns).GetLogs(pod, &v1.PodLogOptions{}).DoRaw()
bytes, err := client.CoreV1().Pods(ns).GetLogs(pod, &v1.PodLogOptions{}).DoRaw(context.TODO())
if err != nil {
return nil, err
}
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/apiserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ this component is purposed to tests webhooks in the apiserver
package e2e

import (
"context"
"fmt"
"time"

Expand All @@ -29,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
deploymentframework "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)

Expand All @@ -52,19 +52,19 @@ var _ = framework.KubeDescribe("API Server webhook tests", func() {
By("Creating deployment " + nameprefix + " in namespace " + ns)

deployment := createImagePolicyWebhookTestDeployment(nameprefix+"-", ns, tag, podname, replicas)
_, err := cs.AppsV1().Deployments(ns).Create(deployment)
_, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
defer func() {
By(fmt.Sprintf("Delete a compliant deployment: %s", deployment.Name))
defer GinkgoRecover()
err := cs.AppsV1().Deployments(ns).Delete(deployment.Name, metav1.NewDeleteOptions(0))
err := cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}()
Expect(err).NotTo(HaveOccurred())
label := map[string]string{
"app": podname,
}
labelSelector := labels.SelectorFromSet(labels.Set(label))
err = deploymentframework.WaitForDeploymentWithCondition(cs, ns, deployment.Name, "MinimumReplicasAvailable", appsv1.DeploymentAvailable)
err = waitForDeploymentWithCondition(cs, ns, deployment.Name, "MinimumReplicasAvailable", appsv1.DeploymentAvailable)
Expect(err).NotTo(HaveOccurred())
_, err = e2epod.WaitForPodsWithLabelRunningReady(cs, ns, labelSelector, int(replicas), 1*time.Minute)
Expect(err).NotTo(HaveOccurred())
Expand All @@ -81,15 +81,15 @@ var _ = framework.KubeDescribe("API Server webhook tests", func() {
By("Creating deployment " + nameprefix + " in namespace " + ns)

deployment := createImagePolicyWebhookTestDeployment(nameprefix+"-", ns, tag, podname, replicas)
_, err := cs.AppsV1().Deployments(ns).Create(deployment)
_, err := cs.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
defer func() {
By(fmt.Sprintf("Delete a compliant deployment: %s", deployment.Name))
defer GinkgoRecover()
err := cs.AppsV1().Deployments(ns).Delete(deployment.Name, metav1.NewDeleteOptions(0))
err := cs.AppsV1().Deployments(ns).Delete(context.TODO(), deployment.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}()
err = deploymentframework.WaitForDeploymentWithCondition(cs, ns, deployment.Name, "FailedCreate", appsv1.DeploymentReplicaFailure)
err = waitForDeploymentWithCondition(cs, ns, deployment.Name, "FailedCreate", appsv1.DeploymentReplicaFailure)
Expect(err).NotTo(HaveOccurred())
})
})
7 changes: 4 additions & 3 deletions test/e2e/audit.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package e2e

import (
"context"
"encoding/json"
"fmt"
"time"
Expand Down Expand Up @@ -49,10 +50,10 @@ var _ = framework.KubeDescribe("Audit", func() {

f.PodClient().Update(pod.Name, updatePod)

_, err := f.PodClient().Patch(pod.Name, types.JSONPatchType, patch)
_, err := f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
framework.ExpectNoError(err, "failed to patch pod")

f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
f.PodClient().DeleteSync(pod.Name, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)

expectEvents(f, []utils.AuditEvent{
{
Expand Down Expand Up @@ -111,7 +112,7 @@ func expectEvents(f *framework.Framework, expectedEvents []utils.AuditEvent) {
pollingTimeout := 5 * time.Minute
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
// Fetch the log stream.
stream, err := f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/logs/kube-audit.log").Stream()
stream, err := f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/logs/kube-audit.log").Stream(context.TODO())
if err != nil {
return false, err
}
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/aws_iam.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ limitations under the License.
package e2e

import (
"context"
"fmt"
"time"

Expand Down Expand Up @@ -52,7 +53,7 @@ var _ = framework.KubeDescribe("AWS IAM Integration (kube-aws-iam-controller)",

By("Creating a awscli POD in namespace " + ns)
pod := createAWSIAMPod("aws-iam-", ns, E2ES3AWSIAMBucket())
_, err := cs.CoreV1().Pods(ns).Create(pod)
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

// AWSIAMRole
Expand All @@ -61,17 +62,17 @@ var _ = framework.KubeDescribe("AWS IAM Integration (kube-aws-iam-controller)",
defer func() {
By("deleting the AWSIAMRole")
defer GinkgoRecover()
err2 := zcs.ZalandoV1().AWSIAMRoles(ns).Delete(rs.Name, metav1.NewDeleteOptions(0))
err2 := zcs.ZalandoV1().AWSIAMRoles(ns).Delete(context.TODO(), rs.Name, metav1.DeleteOptions{})
Expect(err2).NotTo(HaveOccurred())
}()
_, err = zcs.ZalandoV1().AWSIAMRoles(ns).Create(rs)
_, err = zcs.ZalandoV1().AWSIAMRoles(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

// wait for pod to access s3 and POD exit code 0
for {
p, err := cs.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
p, err := cs.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
Expect(fmt.Errorf("Could not get POD %s", pod.Name)).NotTo(HaveOccurred())
return
Expand All @@ -97,13 +98,13 @@ var _ = framework.KubeDescribe("AWS IAM Integration (kube-aws-iam-controller)",

By("Creating a awscli POD in namespace " + ns)
pod := createAWSCLIPod("no-aws-iam-", ns, E2ES3AWSIAMBucket())
_, err := cs.CoreV1().Pods(ns).Create(pod)
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))

// wait for pod to access s3 and POD exit code 0
for {
p, err := cs.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
p, err := cs.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
Expect(fmt.Errorf("Could not get POD %s", pod.Name)).NotTo(HaveOccurred())
return
Expand Down
1 change: 1 addition & 0 deletions test/e2e/cluster_config.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ clusters:
vm_dirty_background_bytes: 67108864
prometheus_tsdb_retention_size: enabled
coredns_max_upsteam_concurrency: 30
ebs_root_volume_size: "550" # required by the limitRanger e2e tests (needs 500Gi ephemoral storage) https://github.com/kubernetes/kubernetes/blob/v1.18.3/test/e2e/scheduling/limit_range.go#L59
criticality_level: 1
environment: e2e
id: ${CLUSTER_ID}
Expand Down
3 changes: 1 addition & 2 deletions test/e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/framework/viperconfig"
"k8s.io/kubernetes/test/e2e/generated"

// test sources
Expand Down Expand Up @@ -61,7 +60,7 @@ func TestMain(m *testing.M) {
// Register test flags, then parse flags.
handleFlags()

if err := viperconfig.ViperizeFlags(*viperConfig, "e2e", flag.CommandLine); err != nil {
if err := viperizeFlags(*viperConfig, "e2e", flag.CommandLine); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
Expand Down
9 changes: 5 additions & 4 deletions test/e2e/external_dns.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ limitations under the License.
package e2e

import (
"context"
"fmt"
"time"

Expand Down Expand Up @@ -50,26 +51,26 @@ var _ = framework.KubeDescribe("External DNS creation", func() {

By("Creating service " + serviceName + " in namespace " + ns)
defer func() {
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}()

hostName := fmt.Sprintf("%s-%d.%s", serviceName, time.Now().UTC().Unix(), E2EHostedZone())
service := createServiceTypeLoadbalancer(serviceName, hostName, labels, port)

_, err := cs.CoreV1().Services(ns).Create(service)
_, err := cs.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

By("Submitting the pod to kubernetes")
pod := createNginxPod(nameprefix, ns, labels, port)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
err2 := cs.CoreV1().Pods(ns).Delete(pod.Name, metav1.NewDeleteOptions(0))
err2 := cs.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
Expect(err2).NotTo(HaveOccurred())
}()

_, err = cs.CoreV1().Pods(ns).Create(pod)
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())

framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
Expand Down
14 changes: 8 additions & 6 deletions test/e2e/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,20 @@ require (
github.com/jteeuwen/go-bindata v0.0.0-20151023091102-a0ff2567cfb7
github.com/karrick/godirwalk v1.8.0 // indirect
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/onsi/ginkgo v1.10.1
github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.7.0
github.com/opencontainers/runtime-spec v1.0.1 // indirect
github.com/pkg/errors v0.8.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 // indirect
github.com/spf13/viper v1.3.2
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
github.com/zalando-incubator/kube-aws-iam-controller v0.1.1
github.com/zalando-incubator/kube-aws-iam-controller v0.1.2
gopkg.in/warnings.v0 v0.1.2 // indirect
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
k8s.io/api v0.18.3
k8s.io/apimachinery v0.18.3
k8s.io/apiserver v0.0.0
k8s.io/client-go v10.0.0+incompatible
k8s.io/kubernetes v1.17.4
k8s.io/client-go v0.18.3
k8s.io/kubernetes v1.18.3
)

replace k8s.io/kubernetes => ./e2e_modules/kubernetes
Expand Down
Loading

0 comments on commit 4499685

Please sign in to comment.