diff --git a/Makefile b/Makefile index 05c8ae4a47..9e808fc920 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ MAKEFILE_PATH = $(dir $(realpath -s $(firstword $(MAKEFILE_LIST)))) # Image URL to use all building/pushing image targets -IMG ?= public.ecr.aws/eks/aws-load-balancer-controller:v2.10.0 +IMG ?= public.ecr.aws/eks/aws-load-balancer-controller:v2.10.1 # Image URL to use for builder stage in Docker build GOLANG_VERSION ?= $(shell cat .go-version) BUILD_IMAGE ?= public.ecr.aws/docker/library/golang:$(GOLANG_VERSION) diff --git a/config/controller/kustomization.yaml b/config/controller/kustomization.yaml index 0b017c773c..f8b3b88285 100644 --- a/config/controller/kustomization.yaml +++ b/config/controller/kustomization.yaml @@ -9,4 +9,4 @@ kind: Kustomization images: - name: controller newName: public.ecr.aws/eks/aws-load-balancer-controller - newTag: v2.10.0 + newTag: v2.10.1 diff --git a/docs/deploy/installation.md b/docs/deploy/installation.md index b0fbee642d..03e6084b7a 100644 --- a/docs/deploy/installation.md +++ b/docs/deploy/installation.md @@ -90,15 +90,15 @@ Example condition for cluster name resource tag: 2. Download an IAM policy for the LBC using one of the following commands:
If your cluster is in a US Gov Cloud region: ``` - curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/install/iam_policy_us-gov.json + curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/install/iam_policy_us-gov.json ``` If your cluster is in a China region: ``` - curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/install/iam_policy_cn.json + curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/install/iam_policy_cn.json ``` If your cluster is in any other region: ``` - curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/install/iam_policy.json + curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/install/iam_policy.json ``` 3. Create an IAM policy named `AWSLoadBalancerControllerIAMPolicy`. If you downloaded a different policy, replace `iam-policy` with the name of the policy that you downloaded. @@ -124,7 +124,7 @@ Example condition for cluster name resource tag: ### Option B: Attach IAM policies to nodes If you're not setting up IAM roles for service accounts, apply the IAM policies from the following URL at a minimum. Please be aware of the possibility that the controller permissions may be assumed by other users in a pod after retrieving the node role credentials, so the best practice would be using IRSA instead of attaching IAM policy directly. ``` -curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/install/iam_policy.json +curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/install/iam_policy.json ``` The following IAM permissions subset is for those using `TargetGroupBinding` only and don't plan to use the LBC to manage security group rules: @@ -209,7 +209,7 @@ We recommend using the Helm chart to install the controller. The chart supports ### Apply YAML 1. Download the spec for the LBC. ``` - wget https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.10.0/v2_10_0_full.yaml + wget https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.10.1/v2_10_1_full.yaml ``` 2. Edit the saved yaml file, go to the Deployment spec, and set the controller `--cluster-name` arg value to your EKS cluster name ``` @@ -233,15 +233,15 @@ We recommend using the Helm chart to install the controller. The chart supports ``` 4. Apply the yaml file ``` - kubectl apply -f v2_10_0_full.yaml + kubectl apply -f v2_10_1_full.yaml ``` 5. Optionally download the default ingressclass and ingressclass params ``` - wget https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.10.0/v2_10_0_ingclass.yaml + wget https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.10.1/v2_10_1_ingclass.yaml ``` 6. Apply the ingressclass and params ``` - kubectl apply -f v2_10_0_ingclass.yaml + kubectl apply -f v2_10_1_ingclass.yaml ``` ## Create Update Strategy diff --git a/docs/examples/echo_server.md b/docs/examples/echo_server.md index 1258730858..0ddb322f13 100644 --- a/docs/examples/echo_server.md +++ b/docs/examples/echo_server.md @@ -87,9 +87,9 @@ In this walkthrough, you'll 1. Deploy all the echoserver resources (namespace, service, deployment) ```bash - kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/examples/echoservice/echoserver-namespace.yaml &&\ - kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/examples/echoservice/echoserver-service.yaml &&\ - kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/examples/echoservice/echoserver-deployment.yaml + kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/examples/echoservice/echoserver-namespace.yaml &&\ + kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/examples/echoservice/echoserver-service.yaml &&\ + kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/examples/echoservice/echoserver-deployment.yaml ``` 1. List all the resources to ensure they were created. @@ -113,7 +113,7 @@ In this walkthrough, you'll 1. Download the echoserver ingress manifest locally. ```bash - wget https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/examples/echoservice/echoserver-ingress.yaml + wget https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/examples/echoservice/echoserver-ingress.yaml ``` 1. Configure the subnets, either by add annotation to the ingress or add tags to subnets. This step is optional in lieu of auto-discovery. @@ -300,7 +300,7 @@ You should get back a valid response. follow below steps if you want to use kube2iam to provide the AWS credentials 1. configure the proper policy - The policy to be used can be fetched from https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.0/docs/install/iam_policy.json + The policy to be used can be fetched from https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.10.1/docs/install/iam_policy.json 1. configure the proper role and create the trust relationship You have to find which role is associated with your K8S nodes. Once you found take note of the full arn: diff --git a/docs/guide/ingress/annotations.md b/docs/guide/ingress/annotations.md index a47e534050..2dc2947ede 100644 --- a/docs/guide/ingress/annotations.md +++ b/docs/guide/ingress/annotations.md @@ -60,6 +60,7 @@ You can add annotations to kubernetes Ingress and Service objects to customize t | [alb.ingress.kubernetes.io/target-node-labels](#target-node-labels) | stringMap |N/A| Ingress,Service | N/A | | [alb.ingress.kubernetes.io/mutual-authentication](#mutual-authentication) | json |N/A| Ingress | Exclusive | | [alb.ingress.kubernetes.io/multi-cluster-target-group](#multi-cluster-target-group) | boolean |N/A| Ingress, Service | N/A | +| [alb.ingress.kubernetes.io/listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap |N/A| Ingress |Merge| ## IngressGroup IngressGroup feature enables you to group multiple Ingress resources together. @@ -903,6 +904,14 @@ Custom attributes to LoadBalancers and TargetGroups can be controlled with follo alb.ingress.kubernetes.io/multi-cluster-target-group: "true" ``` +- `alb.ingress.kubernetes.io/listener-attributes.${Protocol}-${Port}` specifies Listener Attributes which should be applied to listener. + + !!!example + - Server header enablement attribute + ``` + alb.ingress.kubernetes.io/listener-attributes.HTTP-80: routing.http.response.server.enabled=true + ``` + ## Resource Tags The AWS Load Balancer Controller automatically applies following tags to the AWS resources (ALB/TargetGroups/SecurityGroups/Listener/ListenerRule) it creates: diff --git a/docs/guide/service/annotations.md b/docs/guide/service/annotations.md index c7ab9bc07d..e9c494288c 100644 --- a/docs/guide/service/annotations.md +++ b/docs/guide/service/annotations.md @@ -13,48 +13,48 @@ These annotations are specific to the kubernetes [service resources reconciled](#lb-type) by the AWS Load Balancer Controller. Although the list was initially derived from the k8s in-tree `kube-controller-manager`, this documentation is not an accurate reference for the services reconciled by the in-tree controller. -| Name | Type | Default | Notes | -|--------------------------------------------------------------------------------------------------|-------------------------|---------------------------|--------------------------------------------------------| -| [service.beta.kubernetes.io/load-balancer-source-ranges](#lb-source-ranges) | stringList | | | -| [service.beta.kubernetes.io/aws-load-balancer-security-group-prefix-lists](#lb-security-group-prefix-lists) | stringList | | | -| [service.beta.kubernetes.io/aws-load-balancer-type](#lb-type) | string | | | -| [service.beta.kubernetes.io/aws-load-balancer-nlb-target-type](#nlb-target-type) | string | | default `instance` in case of LoadBalancerClass | -| [service.beta.kubernetes.io/aws-load-balancer-name](#load-balancer-name) | string | | | -| [service.beta.kubernetes.io/aws-load-balancer-internal](#lb-internal) | boolean | false | deprecated, in favor of [aws-load-balancer-scheme](#lb-scheme)| -| [service.beta.kubernetes.io/aws-load-balancer-scheme](#lb-scheme) | string | internal | | -| [service.beta.kubernetes.io/aws-load-balancer-proxy-protocol](#proxy-protocol-v2) | string | | Set to `"*"` to enable | -| [service.beta.kubernetes.io/aws-load-balancer-ip-address-type](#ip-address-type) | string | ipv4 | ipv4 \| dualstack | -| [service.beta.kubernetes.io/aws-load-balancer-access-log-enabled](#deprecated-attributes) | boolean | false | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes)| -| [service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name](#deprecated-attributes) | string | | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes)| -| [service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix](#deprecated-attributes)| string | | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes)| -| [service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled](#deprecated-attributes)| boolean | false | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes)| -| [service.beta.kubernetes.io/aws-load-balancer-ssl-cert](#ssl-cert) | stringList | | | -| [service.beta.kubernetes.io/aws-load-balancer-ssl-ports](#ssl-ports) | stringList | | | -| [service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy](#ssl-negotiation-policy) | string | ELBSecurityPolicy-2016-08 | | -| [service.beta.kubernetes.io/aws-load-balancer-backend-protocol](#backend-protocol) | string | | | -| [service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags](#additional-resource-tags) | stringMap | | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-protocol](#healthcheck-protocol) | string | TCP | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-port ](#healthcheck-port) | integer \| traffic-port | traffic-port | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-path](#healthcheck-path) | string | "/" for HTTP(S) protocols | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold](#healthcheck-healthy-threshold) | integer | 3 | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold](#healthcheck-unhealthy-threshold) | integer | 3 | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout](#healthcheck-timeout) | integer | 10 | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval](#healthcheck-interval) | integer | 10 | | -| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-success-codes](#healthcheck-success-codes) | string | 200-399 | | -| [service.beta.kubernetes.io/aws-load-balancer-eip-allocations](#eip-allocations) | stringList | | internet-facing lb only. Length must match the number of subnets| -| [service.beta.kubernetes.io/aws-load-balancer-private-ipv4-addresses](#private-ipv4-addresses) | stringList | | internal lb only. Length must match the number of subnets | -| [service.beta.kubernetes.io/aws-load-balancer-ipv6-addresses](#ipv6-addresses) | stringList | | dualstack lb only. Length must match the number of subnets | -| [service.beta.kubernetes.io/aws-load-balancer-target-group-attributes](#target-group-attributes) | stringMap | | | -| [service.beta.kubernetes.io/aws-load-balancer-subnets](#subnets) | stringList | | | -| [service.beta.kubernetes.io/aws-load-balancer-alpn-policy](#alpn-policy) | string | | | -| [service.beta.kubernetes.io/aws-load-balancer-target-node-labels](#target-node-labels) | stringMap | | | -| [service.beta.kubernetes.io/aws-load-balancer-attributes](#load-balancer-attributes) | stringMap | | | -| [service.beta.kubernetes.io/aws-load-balancer-security-groups](#security-groups) | stringList | | | -| [service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules](#manage-backend-sg-rules) | boolean | true | If `service.beta.kubernetes.io/aws-load-balancer-security-groups` is specified, this must also be explicitly specified otherwise it defaults to `false`. | +| Name | Type | Default | Notes | +|--------------------------------------------------------------------------------------------------|-------------------------|---------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [service.beta.kubernetes.io/load-balancer-source-ranges](#lb-source-ranges) | stringList | | | +| [service.beta.kubernetes.io/aws-load-balancer-security-group-prefix-lists](#lb-security-group-prefix-lists) | stringList | | | +| [service.beta.kubernetes.io/aws-load-balancer-type](#lb-type) | string | | | +| [service.beta.kubernetes.io/aws-load-balancer-nlb-target-type](#nlb-target-type) | string | | default `instance` in case of LoadBalancerClass | +| [service.beta.kubernetes.io/aws-load-balancer-name](#load-balancer-name) | string | | | +| [service.beta.kubernetes.io/aws-load-balancer-internal](#lb-internal) | boolean | false | deprecated, in favor of [aws-load-balancer-scheme](#lb-scheme) | +| [service.beta.kubernetes.io/aws-load-balancer-scheme](#lb-scheme) | string | internal | | +| [service.beta.kubernetes.io/aws-load-balancer-proxy-protocol](#proxy-protocol-v2) | string | | Set to `"*"` to enable | +| [service.beta.kubernetes.io/aws-load-balancer-ip-address-type](#ip-address-type) | string | ipv4 | ipv4 \| dualstack | +| [service.beta.kubernetes.io/aws-load-balancer-access-log-enabled](#deprecated-attributes) | boolean | false | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes) | +| [service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name](#deprecated-attributes) | string | | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes) | +| [service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix](#deprecated-attributes)| string | | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes) | +| [service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled](#deprecated-attributes)| boolean | false | deprecated, in favor of [aws-load-balancer-attributes](#load-balancer-attributes) | +| [service.beta.kubernetes.io/aws-load-balancer-ssl-cert](#ssl-cert) | stringList | | | +| [service.beta.kubernetes.io/aws-load-balancer-ssl-ports](#ssl-ports) | stringList | | | +| [service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy](#ssl-negotiation-policy) | string | ELBSecurityPolicy-2016-08 | | +| [service.beta.kubernetes.io/aws-load-balancer-backend-protocol](#backend-protocol) | string | | | +| [service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags](#additional-resource-tags) | stringMap | | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-protocol](#healthcheck-protocol) | string | TCP | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-port ](#healthcheck-port) | integer \| traffic-port | traffic-port | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-path](#healthcheck-path) | string | "/" for HTTP(S) protocols | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold](#healthcheck-healthy-threshold) | integer | 3 | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold](#healthcheck-unhealthy-threshold) | integer | 3 | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout](#healthcheck-timeout) | integer | 10 | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval](#healthcheck-interval) | integer | 10 | | +| [service.beta.kubernetes.io/aws-load-balancer-healthcheck-success-codes](#healthcheck-success-codes) | string | 200-399 | | +| [service.beta.kubernetes.io/aws-load-balancer-eip-allocations](#eip-allocations) | stringList | | internet-facing lb only. Length must match the number of subnets | +| [service.beta.kubernetes.io/aws-load-balancer-private-ipv4-addresses](#private-ipv4-addresses) | stringList | | internal lb only. Length must match the number of subnets | +| [service.beta.kubernetes.io/aws-load-balancer-ipv6-addresses](#ipv6-addresses) | stringList | | dualstack lb only. Length must match the number of subnets | +| [service.beta.kubernetes.io/aws-load-balancer-target-group-attributes](#target-group-attributes) | stringMap | | | +| [service.beta.kubernetes.io/aws-load-balancer-subnets](#subnets) | stringList | | | +| [service.beta.kubernetes.io/aws-load-balancer-alpn-policy](#alpn-policy) | string | | | +| [service.beta.kubernetes.io/aws-load-balancer-target-node-labels](#target-node-labels) | stringMap | | | +| [service.beta.kubernetes.io/aws-load-balancer-attributes](#load-balancer-attributes) | stringMap | | | +| [service.beta.kubernetes.io/aws-load-balancer-security-groups](#security-groups) | stringList | | | +| [service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules](#manage-backend-sg-rules) | boolean | true | If `service.beta.kubernetes.io/aws-load-balancer-security-groups` is specified, this must also be explicitly specified otherwise it defaults to `false`. | | [service.beta.kubernetes.io/aws-load-balancer-inbound-sg-rules-on-private-link-traffic](#update-security-settings) | string | | | [service.beta.kubernetes.io/aws-load-balancer-listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap | | -| [service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group](#multi-cluster-target-group) | boolean | false | If specified, the controller will only operate on targets that exist within the cluster, ignoring targets from other sources. | -| [service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat](#enable-prefix-for-ipv6-source-nat) | string | off | Optional annotation. dualstack lb only. Allowed values - on and off | +| [service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group](#multi-cluster-target-group) | boolean | false | If specified, the controller will only operate on targets that exist within the cluster, ignoring targets from other sources. | +| [service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat](#enable-prefix-for-ipv6-source-nat) | string | off | Optional annotation. dualstack lb only. Allowed values - on and off | | [service.beta.kubernetes.io/aws-load-balancer-source-nat-ipv6-prefixes](#source-nat-ipv6-prefixes) | stringList | | Optional annotation. dualstack lb only. This annotation is only applicable when user has to set the service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat to "on". Length must match the number of subnets | ## Traffic Routing @@ -206,35 +206,35 @@ You can configure dualstack NLB to support UDP-based services over IPv6 via the - service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat specifies whether Prefix for IPv6 source NAT is enabled or not. UDP-based support can be enabled for dualstack NLBs only if Prefix for IPv6 source NAT is enabled. - !!!note "" - - Applicable to Network Load Balancers using dualstack IP address type. - - This configuration is optional, and you can use it to enable UDP support over IPv6. - - Allowed values are either “on” or “off” - - Once the source prefix for source NATing is enabled, it cannot be disabled if load balancer has a UDP listener attached. - - Steps to disable the aws-load-balancer-enable-prefix-for-ipv6-source-nat after it is enabled and UDP listeners already attached. - - You will have to first remove the UDP listeners and apply the manifest. - - Update the manifest to set source NATing to "off" and then apply the manifest again. - - !!!example - - Enable prefix for IPv6 Source NAT - ``` - service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat: "on" - ``` + !!!note + - Applicable to Network Load Balancers using dualstack IP address type. + - This configuration is optional, and you can use it to enable UDP support over IPv6. + - Allowed values are either “on” or “off” + - Once the source prefix for source NATing is enabled, it cannot be disabled if load balancer has a UDP listener attached. + - Steps to disable the aws-load-balancer-enable-prefix-for-ipv6-source-nat after it is enabled and UDP listeners already attached. + - You will have to first remove the UDP listeners and apply the manifest. + - Update the manifest to set source NATing to "off" and then apply the manifest again. + + !!!example + - Enable prefix for IPv6 Source NAT + ``` + service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat: "on" + ``` - service.beta.kubernetes.io/aws-load-balancer-source-nat-ipv6-prefixes specifies a list of IPv6 prefixes that should be used for IPv6 source NATing. - !!!note "" - - Applicable to Network Load Balancers using dualstack IP address type. - - This annotation can be specified only if service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat annotation is set to “on”. - - This configuration is optional and it can be used to specify custom IPv6 prefixes for IPv6 source NATing to support UDP based services routing in Network Load Balancers using dualstack IP address type. - - If service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat annotation is set to “on”, and you don’t specify this annotation, then IPv6 prefix/CIDR for source NATing will be auto-assigned to each subnet. - - If you are specifying this annotation, you must specify the same number of items in the list as the load balancer subnets annotation and following the same order. Each item in the list can have value of either “auto_assigned” or a valid IPv6 prefix/CIDR with prefix length of 80 and it should be in range of the corresponding subnet CIDR. - - Once the source NAT IPv6 prefixes are set, the IPv6 prefixes cannot be updated if the load balancer has a UDP listener attached. - - !!!example - ``` - service.beta.kubernetes.io/aws-load-balancer-source-nat-ipv6-prefixes: 1025:0223:0009:6487:0001::/80, auto_assigned, 1025:0223:0010:6487:0001::/80 - ``` + !!!note + - Applicable to Network Load Balancers using dualstack IP address type. + - This annotation can be specified only if service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat annotation is set to “on”. + - This configuration is optional and it can be used to specify custom IPv6 prefixes for IPv6 source NATing to support UDP based services routing in Network Load Balancers using dualstack IP address type. + - If service.beta.kubernetes.io/aws-load-balancer-enable-prefix-for-ipv6-source-nat annotation is set to “on”, and you don’t specify this annotation, then IPv6 prefix/CIDR for source NATing will be auto-assigned to each subnet. + - If you are specifying this annotation, you must specify the same number of items in the list as the load balancer subnets annotation and following the same order. Each item in the list can have value of either “auto_assigned” or a valid IPv6 prefix/CIDR with prefix length of 80 and it should be in range of the corresponding subnet CIDR. + - Once the source NAT IPv6 prefixes are set, the IPv6 prefixes cannot be updated if the load balancer has a UDP listener attached. + + !!!example + ``` + service.beta.kubernetes.io/aws-load-balancer-source-nat-ipv6-prefixes: 1025:0223:0009:6487:0001::/80, auto_assigned, 1025:0223:0010:6487:0001::/80 + ``` ## Resource attributes NLB resource attributes can be controlled via the following annotations: diff --git a/helm/aws-load-balancer-controller/Chart.yaml b/helm/aws-load-balancer-controller/Chart.yaml index 1100e7efcb..4646868e14 100644 --- a/helm/aws-load-balancer-controller/Chart.yaml +++ b/helm/aws-load-balancer-controller/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: aws-load-balancer-controller description: AWS Load Balancer Controller Helm chart for Kubernetes -version: 1.10.0 -appVersion: v2.10.0 +version: 1.10.1 +appVersion: v2.10.1 home: https://github.com/aws/eks-charts icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png sources: diff --git a/helm/aws-load-balancer-controller/test.yaml b/helm/aws-load-balancer-controller/test.yaml index 42c7421fd5..9aa8dfc7fb 100644 --- a/helm/aws-load-balancer-controller/test.yaml +++ b/helm/aws-load-balancer-controller/test.yaml @@ -6,7 +6,7 @@ replicaCount: 2 image: repository: public.ecr.aws/eks/aws-load-balancer-controller - tag: v2.10.0 + tag: v2.10.1 pullPolicy: IfNotPresent imagePullSecrets: [] diff --git a/helm/aws-load-balancer-controller/values.yaml b/helm/aws-load-balancer-controller/values.yaml index a33542b8b5..da6a525a25 100644 --- a/helm/aws-load-balancer-controller/values.yaml +++ b/helm/aws-load-balancer-controller/values.yaml @@ -8,7 +8,7 @@ revisionHistoryLimit: 10 image: repository: public.ecr.aws/eks/aws-load-balancer-controller - tag: v2.10.0 + tag: v2.10.1 pullPolicy: IfNotPresent runtimeClassName: "" diff --git a/main.go b/main.go index b6484f24fd..de2a3edc9d 100644 --- a/main.go +++ b/main.go @@ -39,6 +39,8 @@ import ( "sigs.k8s.io/aws-load-balancer-controller/pkg/config" "sigs.k8s.io/aws-load-balancer-controller/pkg/inject" "sigs.k8s.io/aws-load-balancer-controller/pkg/k8s" + awsmetrics "sigs.k8s.io/aws-load-balancer-controller/pkg/metrics/aws" + lbcmetrics "sigs.k8s.io/aws-load-balancer-controller/pkg/metrics/lbc" "sigs.k8s.io/aws-load-balancer-controller/pkg/networking" "sigs.k8s.io/aws-load-balancer-controller/pkg/runtime" "sigs.k8s.io/aws-load-balancer-controller/pkg/targetgroupbinding" @@ -81,7 +83,14 @@ func main() { ctrl.SetLogger(appLogger) klog.SetLoggerWithOptions(appLogger, klog.ContextualLogger(true)) - cloud, err := aws.NewCloud(controllerCFG.AWSConfig, metrics.Registry, ctrl.Log, nil) + var awsMetricsCollector *awsmetrics.Collector + lbcMetricsCollector := lbcmetrics.NewCollector(metrics.Registry) + + if metrics.Registry != nil { + awsMetricsCollector = awsmetrics.NewCollector(metrics.Registry) + } + + cloud, err := aws.NewCloud(controllerCFG.AWSConfig, awsMetricsCollector, ctrl.Log, nil) if err != nil { setupLog.Error(err, "unable to initialize AWS cloud") os.Exit(1) @@ -113,7 +122,7 @@ func main() { subnetResolver := networking.NewDefaultSubnetsResolver(azInfoProvider, cloud.EC2(), cloud.VpcID(), controllerCFG.ClusterName, ctrl.Log.WithName("subnets-resolver")) multiClusterManager := targetgroupbinding.NewMultiClusterManager(mgr.GetClient(), mgr.GetAPIReader(), ctrl.Log) tgbResManager := targetgroupbinding.NewDefaultResourceManager(mgr.GetClient(), cloud.ELBV2(), cloud.EC2(), - podInfoRepo, sgManager, sgReconciler, vpcInfoProvider, multiClusterManager, + podInfoRepo, sgManager, sgReconciler, vpcInfoProvider, multiClusterManager, lbcMetricsCollector, cloud.VpcID(), controllerCFG.ClusterName, controllerCFG.FeatureGates.Enabled(config.EndpointsFailOpen), controllerCFG.EnableEndpointSlices, controllerCFG.DisableRestrictedSGRules, controllerCFG.ServiceTargetENISGTags, mgr.GetEventRecorderFor("targetGroupBinding"), ctrl.Log) backendSGProvider := networking.NewBackendSGProvider(controllerCFG.ClusterName, controllerCFG.BackendSecurityGroup, diff --git a/pkg/aws/cloud.go b/pkg/aws/cloud.go index cc679b9436..41070e70db 100644 --- a/pkg/aws/cloud.go +++ b/pkg/aws/cloud.go @@ -11,7 +11,6 @@ import ( smithymiddleware "github.com/aws/smithy-go/middleware" "net" "os" - "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/metrics" "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/throttle" "sigs.k8s.io/aws-load-balancer-controller/pkg/version" "strings" @@ -21,11 +20,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/go-logr/logr" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" amerrors "k8s.io/apimachinery/pkg/util/errors" epresolver "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/endpoints" "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/provider" "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/services" + aws_metrics "sigs.k8s.io/aws-load-balancer-controller/pkg/metrics/aws" ) const userAgent = "elbv2.k8s.aws" @@ -60,7 +59,7 @@ type Cloud interface { } // NewCloud constructs new Cloud implementation. -func NewCloud(cfg CloudConfig, metricsRegisterer prometheus.Registerer, logger logr.Logger, awsClientsProvider provider.AWSClientsProvider) (Cloud, error) { +func NewCloud(cfg CloudConfig, metricsCollector *aws_metrics.Collector, logger logr.Logger, awsClientsProvider provider.AWSClientsProvider) (Cloud, error) { hasIPv4 := true addrs, err := net.InterfaceAddrs() if err == nil { @@ -122,12 +121,8 @@ func NewCloud(cfg CloudConfig, metricsRegisterer prometheus.Registerer, logger l }) } - if metricsRegisterer != nil { - metricsCollector, err := metrics.NewCollector(metricsRegisterer) - if err != nil { - return nil, errors.Wrapf(err, "failed to initialize sdk metrics collector") - } - awsConfig.APIOptions = metrics.WithSDKMetricCollector(metricsCollector, awsConfig.APIOptions) + if metricsCollector != nil { + awsConfig.APIOptions = aws_metrics.WithSDKMetricCollector(metricsCollector, awsConfig.APIOptions) } if awsClientsProvider == nil { diff --git a/pkg/backend/endpoint_resolver.go b/pkg/backend/endpoint_resolver.go index 507ac16ca2..55d7a50f6b 100644 --- a/pkg/backend/endpoint_resolver.go +++ b/pkg/backend/endpoint_resolver.go @@ -160,7 +160,11 @@ func (r *defaultEndpointResolver) resolvePodEndpointsWithEndpointsData(ctx conte } epAddr := ep.Addresses[0] - podKey := types.NamespacedName{Namespace: svcKey.Namespace, Name: ep.TargetRef.Name} + podNamespace := svcKey.Namespace + if ep.TargetRef.Namespace != "" { + podNamespace = ep.TargetRef.Namespace + } + podKey := types.NamespacedName{Namespace: podNamespace, Name: ep.TargetRef.Name} pod, exists, err := r.podInfoRepo.Get(ctx, podKey) if err != nil { return nil, false, err diff --git a/pkg/deploy/elbv2/listener_manager.go b/pkg/deploy/elbv2/listener_manager.go index 1aa4a6b70f..a9db440779 100644 --- a/pkg/deploy/elbv2/listener_manager.go +++ b/pkg/deploy/elbv2/listener_manager.go @@ -23,8 +23,8 @@ import ( ) var PROTOCOLS_SUPPORTING_LISTENER_ATTRIBUTES = map[elbv2model.Protocol]bool{ - elbv2model.ProtocolHTTP: false, - elbv2model.ProtocolHTTPS: false, + elbv2model.ProtocolHTTP: true, + elbv2model.ProtocolHTTPS: true, elbv2model.ProtocolTCP: true, elbv2model.ProtocolUDP: false, elbv2model.ProtocolTLS: false, diff --git a/pkg/ingress/model_build_actions.go b/pkg/ingress/model_build_actions.go index e2e883dbc3..1562290744 100644 --- a/pkg/ingress/model_build_actions.go +++ b/pkg/ingress/model_build_actions.go @@ -187,7 +187,7 @@ func (t *defaultModelBuildTask) buildAuthenticateOIDCAction(ctx context.Context, t.secretKeys = append(t.secretKeys, secretKey) clientID := strings.TrimRightFunc(string(rawClientID), unicode.IsSpace) - clientSecret := string(rawClientSecret) + clientSecret := strings.TrimRightFunc(string(rawClientSecret), unicode.IsControl) return elbv2model.Action{ Type: elbv2model.ActionTypeAuthenticateOIDC, AuthenticateOIDCConfig: &elbv2model.AuthenticateOIDCActionConfig{ diff --git a/pkg/ingress/model_build_actions_test.go b/pkg/ingress/model_build_actions_test.go index 91a3e51733..db24c7fe55 100644 --- a/pkg/ingress/model_build_actions_test.go +++ b/pkg/ingress/model_build_actions_test.go @@ -85,6 +85,61 @@ func Test_defaultModelBuildTask_buildAuthenticateOIDCAction(t *testing.T) { }, }, }, + { + name: "clientSecret has control characters at end", + env: env{ + secrets: []*corev1.Secret{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "my-ns", + Name: "my-k8s-secret", + }, + Data: map[string][]byte{ + "clientID": []byte("my-client-id"), + "clientSecret": []byte("my-client-secret\n"), + }, + }, + }, + }, + args: args{ + authCfg: AuthConfig{ + Type: AuthTypeCognito, + IDPConfigOIDC: &AuthIDPConfigOIDC{ + Issuer: "https://example.com", + AuthorizationEndpoint: "https://authorization.example.com", + TokenEndpoint: "https://token.example.com", + UserInfoEndpoint: "https://userinfo.example.co", + SecretName: "my-k8s-secret", + AuthenticationRequestExtraParams: map[string]string{ + "key1": "value1", + }, + }, + OnUnauthenticatedRequest: "authenticate", + Scope: "email", + SessionCookieName: "my-session-cookie", + SessionTimeout: 65536, + }, + namespace: "my-ns", + }, + want: elbv2model.Action{ + Type: elbv2model.ActionTypeAuthenticateOIDC, + AuthenticateOIDCConfig: &elbv2model.AuthenticateOIDCActionConfig{ + Issuer: "https://example.com", + AuthorizationEndpoint: "https://authorization.example.com", + TokenEndpoint: "https://token.example.com", + UserInfoEndpoint: "https://userinfo.example.co", + ClientID: "my-client-id", + ClientSecret: "my-client-secret", + AuthenticationRequestExtraParams: map[string]string{ + "key1": "value1", + }, + OnUnauthenticatedRequest: authBehaviorAuthenticate, + Scope: awssdk.String("email"), + SessionCookieName: awssdk.String("my-session-cookie"), + SessionTimeout: awssdk.Int64(65536), + }, + }, + }, { name: "clientID & clientSecret configured - legacy clientId", env: env{ diff --git a/pkg/ingress/model_build_listener.go b/pkg/ingress/model_build_listener.go index 31757773e5..80f848ba3d 100644 --- a/pkg/ingress/model_build_listener.go +++ b/pkg/ingress/model_build_listener.go @@ -426,6 +426,14 @@ func (t *defaultModelBuildTask) fetchTrustStoreArnFromName(ctx context.Context, func (t *defaultModelBuildTask) buildIngressGroupListenerAttributes(ctx context.Context, ingList []ClassifiedIngress, listenerProtocol elbv2model.Protocol, port int32) ([]elbv2model.ListenerAttribute, error) { rawIngGrouplistenerAttributes := make(map[string]string) + ingClassAttributes := make(map[string]string) + if len(ingList) > 0 { + var err error + ingClassAttributes, err = t.buildIngressClassListenerAttributes(ingList[0].IngClassConfig, listenerProtocol, port) + if err != nil { + return nil, err + } + } for _, ing := range ingList { ingAttributes, err := t.buildIngressListenerAttributes(ctx, ing.Ing.Annotations, port, listenerProtocol) if err != nil { @@ -435,18 +443,23 @@ func (t *defaultModelBuildTask) buildIngressGroupListenerAttributes(ctx context. attributeKey := attribute.Key attributeValue := attribute.Value if existingAttributeValue, exists := rawIngGrouplistenerAttributes[attributeKey]; exists && existingAttributeValue != attributeValue { - return nil, errors.Errorf("conflicting attributes %v: %v | %v", attributeKey, existingAttributeValue, attributeValue) + if ingClassValue, exists := ingClassAttributes[attributeKey]; exists { + // Conflict is resolved by ingClassAttributes, show a warning + t.logger.Info("listener attribute conflict resolved by ingress class", + "attributeKey", attributeKey, + "existingValue", existingAttributeValue, + "newValue", attributeValue, + "ingClassValue", ingClassValue) + } else { + // Conflict is not resolved by ingClassAttributes, return an error + return nil, errors.Errorf("conflicting listener attributes %v: %v | %v for ingress %s/%s", + attributeKey, existingAttributeValue, attributeValue, ing.Ing.Namespace, ing.Ing.Name) + } } rawIngGrouplistenerAttributes[attributeKey] = attributeValue } } - if len(ingList) > 0 { - ingClassAttributes, err := t.buildIngressClassListenerAttributes(ingList[0].IngClassConfig, listenerProtocol, port) - if err != nil { - return nil, err - } - rawIngGrouplistenerAttributes = algorithm.MergeStringMap(ingClassAttributes, rawIngGrouplistenerAttributes) - } + rawIngGrouplistenerAttributes = algorithm.MergeStringMap(ingClassAttributes, rawIngGrouplistenerAttributes) attributes := make([]elbv2model.ListenerAttribute, 0, len(rawIngGrouplistenerAttributes)) for attrKey, attrValue := range rawIngGrouplistenerAttributes { attributes = append(attributes, elbv2model.ListenerAttribute{ diff --git a/pkg/ingress/model_build_listener_test.go b/pkg/ingress/model_build_listener_test.go index ca856b6d9a..9514b8f6e1 100644 --- a/pkg/ingress/model_build_listener_test.go +++ b/pkg/ingress/model_build_listener_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" networking "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + elbv2api "sigs.k8s.io/aws-load-balancer-controller/apis/elbv2/v1beta1" "sigs.k8s.io/aws-load-balancer-controller/pkg/annotations" "sigs.k8s.io/aws-load-balancer-controller/pkg/model/elbv2" elbv2model "sigs.k8s.io/aws-load-balancer-controller/pkg/model/elbv2" @@ -272,6 +273,89 @@ func Test_buildListenerAttributes(t *testing.T) { }, }, }, + { + name: "Ignore conflicting value when the key is specified by ingress class param", + fields: fields{ + ingGroup: Group{ + ID: GroupID{Name: "explicit-group"}, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-6", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/listen-ports": `[{"HTTP": 80}]`, + "alb.ingress.kubernetes.io/listener-attributes.HTTP-80": "attrKey1=attrValue1", + }, + }, + }, + IngClassConfig: ClassConfiguration{ + IngClassParams: &elbv2api.IngressClassParams{ + ObjectMeta: metav1.ObjectMeta{ + Name: "awesome-class", + }, + Spec: elbv2api.IngressClassParamsSpec{ + Listeners: []elbv2api.Listener{ + { + Protocol: "HTTP", + Port: 80, + ListenerAttributes: []elbv2api.Attribute{ + { + Key: "attrKey1", + Value: "attrValue1", + }, + }, + }, + }, + }, + }, + }, + }, + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-7", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/listen-ports": `[{"HTTP": 80}]`, + "alb.ingress.kubernetes.io/listener-attributes.HTTP-80": "attrKey1=attrValue2", + }, + }, + }, + IngClassConfig: ClassConfiguration{ + IngClassParams: &elbv2api.IngressClassParams{ + ObjectMeta: metav1.ObjectMeta{ + Name: "awesome-class", + }, + Spec: elbv2api.IngressClassParamsSpec{ + Listeners: []elbv2api.Listener{ + { + Protocol: "HTTP", + Port: 80, + ListenerAttributes: []elbv2api.Attribute{ + { + Key: "attrKey1", + Value: "attrValue1", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + wantValue: []elbv2model.ListenerAttribute{ + { + Key: "attrKey1", + Value: "attrValue1", + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/ingress/model_build_load_balancer_attributes.go b/pkg/ingress/model_build_load_balancer_attributes.go index e63e286bbe..af67c49239 100644 --- a/pkg/ingress/model_build_load_balancer_attributes.go +++ b/pkg/ingress/model_build_load_balancer_attributes.go @@ -9,6 +9,14 @@ import ( // buildIngressGroupLoadBalancerAttributes builds the LB attributes for a group of Ingresses. func (t *defaultModelBuildTask) buildIngressGroupLoadBalancerAttributes(ingList []ClassifiedIngress) (map[string]string, error) { ingGroupAttributes := make(map[string]string) + ingClassAttributes := make(map[string]string) + if len(ingList) > 0 { + var err error + ingClassAttributes, err = t.buildIngressClassLoadBalancerAttributes(ingList[0].IngClassConfig) + if err != nil { + return nil, err + } + } for _, ing := range ingList { ingAttributes, err := t.buildIngressLoadBalancerAttributes(ing) if err != nil { @@ -18,18 +26,22 @@ func (t *defaultModelBuildTask) buildIngressGroupLoadBalancerAttributes(ingList for attrKey, attrValue := range ingAttributes { existingAttrValue, exists := ingGroupAttributes[attrKey] if exists && existingAttrValue != attrValue { - return nil, errors.Errorf("conflicting attributes %v: %v | %v", attrKey, existingAttrValue, attrValue) + if ingClassValue, exists := ingClassAttributes[attrKey]; exists { + // Conflict is resolved by ingClassAttributes, show a warning + t.logger.Info("load balancer attribute conflict resolved by ingress class", + "attributeKey", attrKey, + "existingValue", existingAttrValue, + "newValue", attrValue, + "ingClassValue", ingClassValue) + } else { + // Conflict is not resolved by ingClassAttributes, return an error + return nil, errors.Errorf("conflicting load balancer attributes %v: %v | %v", attrKey, existingAttrValue, attrValue) + } } ingGroupAttributes[attrKey] = attrValue } } - if len(ingList) > 0 { - ingClassAttributes, err := t.buildIngressClassLoadBalancerAttributes(ingList[0].IngClassConfig) - if err != nil { - return nil, err - } - return algorithm.MergeStringMap(ingClassAttributes, ingGroupAttributes), nil - } + ingGroupAttributes = algorithm.MergeStringMap(ingClassAttributes, ingGroupAttributes) return ingGroupAttributes, nil } diff --git a/pkg/ingress/model_build_load_balancer_attributes_test.go b/pkg/ingress/model_build_load_balancer_attributes_test.go index bb42e50e3f..4c623bd8ee 100644 --- a/pkg/ingress/model_build_load_balancer_attributes_test.go +++ b/pkg/ingress/model_build_load_balancer_attributes_test.go @@ -2,13 +2,14 @@ package ingress import ( "fmt" + "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" networking "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" elbv2api "sigs.k8s.io/aws-load-balancer-controller/apis/elbv2/v1beta1" "sigs.k8s.io/aws-load-balancer-controller/pkg/annotations" - "testing" ) func Test_defaultModelBuildTask_buildIngressGroupLoadBalancerAttributes(t *testing.T) { @@ -82,7 +83,7 @@ func Test_defaultModelBuildTask_buildIngressGroupLoadBalancerAttributes(t *testi }, }, }, - wantErr: errors.New("conflicting attributes deletion_protection.enabled: true | false"), + wantErr: errors.New("conflicting load balancer attributes deletion_protection.enabled: true | false"), }, { name: "non-empty annotation attributes from single Ingress, non-empty IngressClass attributes - has overlap attributes", diff --git a/pkg/aws/metrics/collector.go b/pkg/metrics/aws/collector.go similarity index 92% rename from pkg/aws/metrics/collector.go rename to pkg/metrics/aws/collector.go index 607d5f3054..f6fe66e980 100644 --- a/pkg/aws/metrics/collector.go +++ b/pkg/metrics/aws/collector.go @@ -1,4 +1,4 @@ -package metrics +package aws import ( "context" @@ -18,24 +18,21 @@ const ( sdkMiddlewareCollectAPIRequestMetric = "collectAPIRequestMetric" ) -type collector struct { +type Collector struct { instruments *instruments } -func NewCollector(registerer prometheus.Registerer) (*collector, error) { - instruments, err := newInstruments(registerer) - if err != nil { - return nil, err - } - return &collector{ +func NewCollector(registerer prometheus.Registerer) *Collector { + instruments := newInstruments(registerer) + return &Collector{ instruments: instruments, - }, nil + } } /* WithSDKMetricCollector is a function that collects prometheus metrics for the AWS SDK Go v2 API calls ad requests */ -func WithSDKMetricCollector(c *collector, apiOptions []func(*smithymiddleware.Stack) error) []func(*smithymiddleware.Stack) error { +func WithSDKMetricCollector(c *Collector, apiOptions []func(*smithymiddleware.Stack) error) []func(*smithymiddleware.Stack) error { apiOptions = append(apiOptions, func(stack *smithymiddleware.Stack) error { return WithSDKCallMetricCollector(c)(stack) }, func(stack *smithymiddleware.Stack) error { @@ -48,7 +45,7 @@ func WithSDKMetricCollector(c *collector, apiOptions []func(*smithymiddleware.St WithSDKCallMetricCollector is a middleware for the AWS SDK Go v2 that collects and reports metrics on API calls. The call metrics are collected after the call is completed */ -func WithSDKCallMetricCollector(c *collector) func(stack *smithymiddleware.Stack) error { +func WithSDKCallMetricCollector(c *Collector) func(stack *smithymiddleware.Stack) error { return func(stack *smithymiddleware.Stack) error { return stack.Initialize.Add(smithymiddleware.InitializeMiddlewareFunc(sdkMiddlewareCollectAPICallMetric, func( ctx context.Context, input smithymiddleware.InitializeInput, next smithymiddleware.InitializeHandler, @@ -91,7 +88,7 @@ func WithSDKCallMetricCollector(c *collector) func(stack *smithymiddleware.Stack WithSDKRequestMetricCollector is a middleware for the AWS SDK Go v2 that collects and reports metrics on API requests. The request metrics are collected after each retry attempts */ -func WithSDKRequestMetricCollector(c *collector) func(stack *smithymiddleware.Stack) error { +func WithSDKRequestMetricCollector(c *Collector) func(stack *smithymiddleware.Stack) error { return func(stack *smithymiddleware.Stack) error { return stack.Finalize.Add(smithymiddleware.FinalizeMiddlewareFunc(sdkMiddlewareCollectAPIRequestMetric, func( ctx context.Context, input smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler, diff --git a/pkg/aws/metrics/collector_test.go b/pkg/metrics/aws/collector_test.go similarity index 98% rename from pkg/aws/metrics/collector_test.go rename to pkg/metrics/aws/collector_test.go index db2579381f..3e52a2dda9 100644 --- a/pkg/aws/metrics/collector_test.go +++ b/pkg/metrics/aws/collector_test.go @@ -1,4 +1,4 @@ -package metrics +package aws import ( "errors" diff --git a/pkg/aws/metrics/instruments.go b/pkg/metrics/aws/instruments.go similarity index 77% rename from pkg/aws/metrics/instruments.go rename to pkg/metrics/aws/instruments.go index e3ca812044..150bf11019 100644 --- a/pkg/aws/metrics/instruments.go +++ b/pkg/metrics/aws/instruments.go @@ -1,11 +1,11 @@ -package metrics +package aws import ( "github.com/prometheus/client_golang/prometheus" ) const ( - metricSubsystemAWS = "aws" + metricSubSystem = "aws" metricAPICallsTotal = "api_calls_total" metricAPICallDurationSeconds = "api_call_duration_seconds" @@ -31,55 +31,41 @@ type instruments struct { } // newInstruments allocates and register new metrics to registerer -func newInstruments(registerer prometheus.Registerer) (*instruments, error) { +func newInstruments(registerer prometheus.Registerer) *instruments { apiCallsTotal := prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: metricSubsystemAWS, + Subsystem: metricSubSystem, Name: metricAPICallsTotal, Help: "Total number of SDK API calls from the customer's code to AWS services", }, []string{labelService, labelOperation, labelStatusCode, labelErrorCode}) apiCallDurationSeconds := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: metricSubsystemAWS, + Subsystem: metricSubSystem, Name: metricAPICallDurationSeconds, Help: "Perceived latency from when your code makes an SDK call, includes retries", }, []string{labelService, labelOperation}) apiCallRetries := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: metricSubsystemAWS, + Subsystem: metricSubSystem, Name: metricAPICallRetries, Help: "Number of times the SDK retried requests to AWS services for SDK API calls", Buckets: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, }, []string{labelService, labelOperation}) apiRequestsTotal := prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: metricSubsystemAWS, + Subsystem: metricSubSystem, Name: metricAPIRequestsTotal, Help: "Total number of HTTP requests that the SDK made", }, []string{labelService, labelOperation, labelStatusCode, labelErrorCode}) apiRequestDurationSecond := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: metricSubsystemAWS, + Subsystem: metricSubSystem, Name: metricAPIRequestDurationSeconds, Help: "Latency of an individual HTTP request to the service endpoint", }, []string{labelService, labelOperation}) - if err := registerer.Register(apiCallsTotal); err != nil { - return nil, err - } - if err := registerer.Register(apiCallDurationSeconds); err != nil { - return nil, err - } - if err := registerer.Register(apiCallRetries); err != nil { - return nil, err - } - if err := registerer.Register(apiRequestsTotal); err != nil { - return nil, err - } - if err := registerer.Register(apiRequestDurationSecond); err != nil { - return nil, err - } + registerer.MustRegister(apiCallsTotal, apiCallDurationSeconds, apiCallRetries, apiRequestsTotal, apiRequestDurationSecond) return &instruments{ apiCallsTotal: apiCallsTotal, apiCallDurationSeconds: apiCallDurationSeconds, apiCallRetries: apiCallRetries, apiRequestsTotal: apiRequestsTotal, apiRequestDurationSecond: apiRequestDurationSecond, - }, nil + } } diff --git a/pkg/metrics/lbc/collector.go b/pkg/metrics/lbc/collector.go new file mode 100644 index 0000000000..34da128486 --- /dev/null +++ b/pkg/metrics/lbc/collector.go @@ -0,0 +1,39 @@ +package lbc + +import ( + "github.com/prometheus/client_golang/prometheus" + "time" +) + +type MetricCollector interface { + // ObservePodReadinessGateReady this metric is useful to determine how fast pods are becoming ready in the load balancer. + // Due to some architectural constraints, we can only emit this metric for pods that are using readiness gates. + ObservePodReadinessGateReady(namespace string, tgbName string, duration time.Duration) +} + +type collector struct { + instruments *instruments +} + +type noOpCollector struct{} + +func (n *noOpCollector) ObservePodReadinessGateReady(_ string, _ string, _ time.Duration) { +} + +func NewCollector(registerer prometheus.Registerer) MetricCollector { + if registerer == nil { + return &noOpCollector{} + } + + instruments := newInstruments(registerer) + return &collector{ + instruments: instruments, + } +} + +func (c *collector) ObservePodReadinessGateReady(namespace string, tgbName string, duration time.Duration) { + c.instruments.podReadinessFlipSeconds.With(prometheus.Labels{ + labelNamespace: namespace, + labelName: tgbName, + }).Observe(duration.Seconds()) +} diff --git a/pkg/metrics/lbc/instruments.go b/pkg/metrics/lbc/instruments.go new file mode 100644 index 0000000000..0a38b7f771 --- /dev/null +++ b/pkg/metrics/lbc/instruments.go @@ -0,0 +1,38 @@ +package lbc + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + metricSubsystem = "awslbc" +) + +// These metrics are exported to be used in unit test validation. +const ( + // MetricPodReadinessGateReady tracks the time to flip a readiness gate to true + MetricPodReadinessGateReady = "readiness_gate_ready_seconds" +) + +const ( + labelNamespace = "namespace" + labelName = "name" +) + +type instruments struct { + podReadinessFlipSeconds *prometheus.HistogramVec +} + +// newInstruments allocates and register new metrics to registerer +func newInstruments(registerer prometheus.Registerer) *instruments { + podReadinessFlipSeconds := prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: metricSubsystem, + Name: MetricPodReadinessGateReady, + Help: "Latency from pod getting added to the load balancer until the readiness gate is flipped to healthy.", + }, []string{labelNamespace, labelName}) + + registerer.MustRegister(podReadinessFlipSeconds) + return &instruments{ + podReadinessFlipSeconds: podReadinessFlipSeconds, + } +} diff --git a/pkg/metrics/lbc/mockcollector.go b/pkg/metrics/lbc/mockcollector.go new file mode 100644 index 0000000000..9c8fb6a43a --- /dev/null +++ b/pkg/metrics/lbc/mockcollector.go @@ -0,0 +1,37 @@ +package lbc + +import ( + "time" +) + +type MockCollector struct { + Invocations map[string][]interface{} +} + +type MockHistogramMetric struct { + namespace string + name string + duration time.Duration +} + +func (m *MockCollector) ObservePodReadinessGateReady(namespace string, tgbName string, d time.Duration) { + m.recordHistogram(MetricPodReadinessGateReady, namespace, tgbName, d) +} + +func (m *MockCollector) recordHistogram(metricName string, namespace string, name string, d time.Duration) { + m.Invocations[metricName] = append(m.Invocations[MetricPodReadinessGateReady], MockHistogramMetric{ + namespace: namespace, + name: name, + duration: d, + }) +} + +func NewMockCollector() MetricCollector { + + mockInvocations := make(map[string][]interface{}) + mockInvocations[MetricPodReadinessGateReady] = make([]interface{}, 0) + + return &MockCollector{ + Invocations: mockInvocations, + } +} diff --git a/pkg/networking/subnet_resolver.go b/pkg/networking/subnet_resolver.go index aba7fba313..f00e8a51d1 100644 --- a/pkg/networking/subnet_resolver.go +++ b/pkg/networking/subnet_resolver.go @@ -3,10 +3,11 @@ package networking import ( "context" "fmt" - ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "sort" "strings" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awssdk "github.com/aws/aws-sdk-go-v2/aws" ec2sdk "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/go-logr/logr" @@ -328,7 +329,6 @@ func (r *defaultSubnetsResolver) ResolveViaNameOrIDSlice(ctx context.Context, su if err := r.validateSubnetsMinimalCount(resolvedSubnets, subnetLocale, resolveOpts); err != nil { return nil, err } - sortSubnetsByID(resolvedSubnets) return resolvedSubnets, nil } diff --git a/pkg/targetgroupbinding/resource_manager.go b/pkg/targetgroupbinding/resource_manager.go index 9056c36664..af25a824f6 100644 --- a/pkg/targetgroupbinding/resource_manager.go +++ b/pkg/targetgroupbinding/resource_manager.go @@ -6,6 +6,7 @@ import ( elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/aws/smithy-go" "net/netip" + lbcmetrics "sigs.k8s.io/aws-load-balancer-controller/pkg/metrics/lbc" "time" "k8s.io/client-go/tools/record" @@ -37,7 +38,7 @@ type ResourceManager interface { // NewDefaultResourceManager constructs new defaultResourceManager. func NewDefaultResourceManager(k8sClient client.Client, elbv2Client services.ELBV2, ec2Client services.EC2, podInfoRepo k8s.PodInfoRepo, sgManager networking.SecurityGroupManager, sgReconciler networking.SecurityGroupReconciler, - vpcInfoProvider networking.VPCInfoProvider, multiClusterManager MultiClusterManager, + vpcInfoProvider networking.VPCInfoProvider, multiClusterManager MultiClusterManager, metricsCollector lbcmetrics.MetricCollector, vpcID string, clusterName string, failOpenEnabled bool, endpointSliceEnabled bool, disabledRestrictedSGRulesFlag bool, endpointSGTags map[string]string, eventRecorder record.EventRecorder, logger logr.Logger) *defaultResourceManager { @@ -60,6 +61,7 @@ func NewDefaultResourceManager(k8sClient client.Client, elbv2Client services.ELB vpcInfoProvider: vpcInfoProvider, podInfoRepo: podInfoRepo, multiClusterManager: multiClusterManager, + metricsCollector: metricsCollector, requeueDuration: defaultRequeueDuration, } @@ -78,6 +80,7 @@ type defaultResourceManager struct { vpcInfoProvider networking.VPCInfoProvider podInfoRepo k8s.PodInfoRepo multiClusterManager MultiClusterManager + metricsCollector lbcmetrics.MetricCollector vpcID string requeueDuration time.Duration @@ -240,7 +243,7 @@ func (m *defaultResourceManager) reconcileWithIPTargetType(ctx context.Context, return "", "", false, err } - anyPodNeedFurtherProbe, err := m.updateTargetHealthPodCondition(ctx, targetHealthCondType, matchedEndpointAndTargets, unmatchedEndpoints) + anyPodNeedFurtherProbe, err := m.updateTargetHealthPodCondition(ctx, targetHealthCondType, matchedEndpointAndTargets, unmatchedEndpoints, tgb) if err != nil { return "", "", false, err } @@ -374,13 +377,13 @@ func (m *defaultResourceManager) cleanupTargets(ctx context.Context, tgb *elbv2a // updateTargetHealthPodCondition will updates pod's targetHealth condition for matchedEndpointAndTargets and unmatchedEndpoints. // returns whether further probe is needed or not func (m *defaultResourceManager) updateTargetHealthPodCondition(ctx context.Context, targetHealthCondType corev1.PodConditionType, - matchedEndpointAndTargets []podEndpointAndTargetPair, unmatchedEndpoints []backend.PodEndpoint) (bool, error) { + matchedEndpointAndTargets []podEndpointAndTargetPair, unmatchedEndpoints []backend.PodEndpoint, tgb *elbv2api.TargetGroupBinding) (bool, error) { anyPodNeedFurtherProbe := false for _, endpointAndTarget := range matchedEndpointAndTargets { pod := endpointAndTarget.endpoint.Pod targetHealth := endpointAndTarget.target.TargetHealth - needFurtherProbe, err := m.updateTargetHealthPodConditionForPod(ctx, pod, targetHealth, targetHealthCondType) + needFurtherProbe, err := m.updateTargetHealthPodConditionForPod(ctx, pod, targetHealth, targetHealthCondType, tgb) if err != nil { return false, err } @@ -396,7 +399,7 @@ func (m *defaultResourceManager) updateTargetHealthPodCondition(ctx context.Cont Reason: elbv2types.TargetHealthReasonEnumRegistrationInProgress, Description: awssdk.String("Target registration is in progress"), } - needFurtherProbe, err := m.updateTargetHealthPodConditionForPod(ctx, pod, targetHealth, targetHealthCondType) + needFurtherProbe, err := m.updateTargetHealthPodConditionForPod(ctx, pod, targetHealth, targetHealthCondType, tgb) if err != nil { return false, err } @@ -410,7 +413,7 @@ func (m *defaultResourceManager) updateTargetHealthPodCondition(ctx context.Cont // updateTargetHealthPodConditionForPod updates pod's targetHealth condition for a single pod and its matched target. // returns whether further probe is needed or not. func (m *defaultResourceManager) updateTargetHealthPodConditionForPod(ctx context.Context, pod k8s.PodInfo, - targetHealth *elbv2types.TargetHealth, targetHealthCondType corev1.PodConditionType) (bool, error) { + targetHealth *elbv2types.TargetHealth, targetHealthCondType corev1.PodConditionType, tgb *elbv2api.TargetGroupBinding) (bool, error) { if !pod.HasAnyOfReadinessGates([]corev1.PodConditionType{targetHealthCondType}) { return false, nil } @@ -468,6 +471,12 @@ func (m *defaultResourceManager) updateTargetHealthPodConditionForPod(ctx contex return false, err } + // Only update duration on unhealthy -> healthy flips. + if targetHealthCondStatus == corev1.ConditionTrue && hasExistingTargetHealthCond && !existingTargetHealthCond.LastTransitionTime.IsZero() && existingTargetHealthCond.Status != corev1.ConditionTrue { + delta := newTargetHealthCond.LastTransitionTime.Sub(existingTargetHealthCond.LastTransitionTime.Time) + m.metricsCollector.ObservePodReadinessGateReady(tgb.Namespace, tgb.Name, delta) + } + return needFurtherProbe, nil } @@ -509,7 +518,7 @@ func (m *defaultResourceManager) updatePodAsHealthyForDeletedTGB(ctx context.Con State: elbv2types.TargetHealthStateEnumHealthy, Description: awssdk.String("Target Group Binding is deleted"), } - _, err := m.updateTargetHealthPodConditionForPod(ctx, pod, targetHealth, targetHealthCondType) + _, err := m.updateTargetHealthPodConditionForPod(ctx, pod, targetHealth, targetHealthCondType, tgb) if err != nil { return err } diff --git a/pkg/targetgroupbinding/resource_manager_test.go b/pkg/targetgroupbinding/resource_manager_test.go index 1031c1e530..4839b2fb24 100644 --- a/pkg/targetgroupbinding/resource_manager_test.go +++ b/pkg/targetgroupbinding/resource_manager_test.go @@ -3,6 +3,8 @@ package targetgroupbinding import ( "context" elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + elbv2api "sigs.k8s.io/aws-load-balancer-controller/apis/elbv2/v1beta1" + lbcmetrics "sigs.k8s.io/aws-load-balancer-controller/pkg/metrics/lbc" "testing" awssdk "github.com/aws/aws-sdk-go-v2/aws" @@ -26,6 +28,9 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing pods []*corev1.Pod } + tgbName := "tgb" + tgbNamespace := "tgbNamespace" + type args struct { pod k8s.PodInfo targetHealth *elbv2types.TargetHealth @@ -33,12 +38,13 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing } tests := []struct { - name string - env env - args args - want bool - wantPod *corev1.Pod - wantErr error + name string + env env + args args + want bool + wantPod *corev1.Pod + wantMetric bool + wantErr error }{ { name: "pod contains readinessGate and targetHealth is healthy - add pod condition", @@ -137,10 +143,11 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing Status: corev1.PodStatus{ Conditions: []corev1.PodCondition{ { - Type: "target-health.elbv2.k8s.aws/my-tgb", - Message: string(elbv2types.TargetHealthReasonEnumRegistrationInProgress), - Reason: "Elb.RegistrationInProgress", - Status: corev1.ConditionFalse, + Type: "target-health.elbv2.k8s.aws/my-tgb", + Message: string(elbv2types.TargetHealthReasonEnumRegistrationInProgress), + Reason: "Elb.RegistrationInProgress", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), }, { Type: corev1.ContainersReady, @@ -162,10 +169,11 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing }, Conditions: []corev1.PodCondition{ { - Type: "target-health.elbv2.k8s.aws/my-tgb", - Message: string(elbv2types.TargetHealthReasonEnumRegistrationInProgress), - Reason: "Elb.RegistrationInProgress", - Status: corev1.ConditionFalse, + Type: "target-health.elbv2.k8s.aws/my-tgb", + Message: string(elbv2types.TargetHealthReasonEnumRegistrationInProgress), + Reason: "Elb.RegistrationInProgress", + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), }, { Type: corev1.ContainersReady, @@ -178,7 +186,8 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing }, targetHealthCondType: "target-health.elbv2.k8s.aws/my-tgb", }, - want: false, + want: false, + wantMetric: true, wantPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -226,10 +235,11 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing Status: corev1.PodStatus{ Conditions: []corev1.PodCondition{ { - Type: "target-health.elbv2.k8s.aws/my-tgb", - Status: corev1.ConditionFalse, - Reason: string(elbv2types.TargetHealthReasonEnumRegistrationInProgress), - Message: "Target registration is in progress", + Type: "target-health.elbv2.k8s.aws/my-tgb", + Status: corev1.ConditionFalse, + Reason: string(elbv2types.TargetHealthReasonEnumRegistrationInProgress), + Message: "Target registration is in progress", + LastTransitionTime: metav1.Now(), }, { Type: corev1.ContainersReady, @@ -382,10 +392,15 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing k8sClient := testclient.NewClientBuilder().WithScheme(k8sSchema).Build() m := &defaultResourceManager{ - k8sClient: k8sClient, - logger: logr.New(&log.NullLogSink{}), + k8sClient: k8sClient, + logger: logr.New(&log.NullLogSink{}), + metricsCollector: lbcmetrics.NewMockCollector(), } + tgb := &elbv2api.TargetGroupBinding{} + tgb.Name = tgbName + tgb.Namespace = tgbNamespace + ctx := context.Background() for _, pod := range tt.env.pods { err := k8sClient.Create(ctx, pod.DeepCopy()) @@ -393,7 +408,7 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing } got, err := m.updateTargetHealthPodConditionForPod(context.Background(), - tt.args.pod, tt.args.targetHealth, tt.args.targetHealthCondType) + tt.args.pod, tt.args.targetHealth, tt.args.targetHealthCondType, tgb) if tt.wantErr != nil { assert.EqualError(t, err, tt.wantErr.Error()) } else { @@ -410,6 +425,9 @@ func Test_defaultResourceManager_updateTargetHealthPodConditionForPod(t *testing } assert.True(t, cmp.Equal(tt.wantPod, updatedPod, opts), "diff", cmp.Diff(tt.wantPod, updatedPod, opts)) } + + mockCollector := m.metricsCollector.(*lbcmetrics.MockCollector) + assert.Equal(t, tt.wantMetric, len(mockCollector.Invocations[lbcmetrics.MetricPodReadinessGateReady]) == 1) }) } } diff --git a/test/e2e/ingress/vanilla_ingress_test.go b/test/e2e/ingress/vanilla_ingress_test.go index f5366889de..a79a3e673d 100644 --- a/test/e2e/ingress/vanilla_ingress_test.go +++ b/test/e2e/ingress/vanilla_ingress_test.go @@ -11,6 +11,7 @@ import ( "github.com/gavv/httpexpect/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" networking "k8s.io/api/networking/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -777,6 +778,56 @@ var _ = Describe("vanilla ingress tests", func() { } }) }) + + Context("with `alb.ingress.kubernetes.io/listener-attributes.{Protocol}-{Port}` variant settings", func() { + It("with 'alb.ingress.kubernetes.io/listener-attributes.{Protocol}-{Port}' annotation explicitly specified, one ALB shall be created and functional", func() { + appBuilder := manifest.NewFixedResponseServiceBuilder() + ingBuilder := manifest.NewIngressBuilder() + dp, svc := appBuilder.Build(sandboxNS.Name, "app", tf.Options.TestImageRegistry) + ingBackend := networking.IngressBackend{ + Service: &networking.IngressServiceBackend{ + Name: svc.Name, + Port: networking.ServiceBackendPort{ + Number: 80, + }, + }, + } + annotation := map[string]string{ + "kubernetes.io/ingress.class": "alb", + "alb.ingress.kubernetes.io/scheme": "internet-facing", + "alb.ingress.kubernetes.io/listen-ports": `[{"HTTP": 80}]`, + "alb.ingress.kubernetes.io/listener-attributes.HTTP-80": "routing.http.response.server.enabled=false", + } + if tf.Options.IPFamily == "IPv6" { + annotation["alb.ingress.kubernetes.io/ip-address-type"] = "dualstack" + annotation["alb.ingress.kubernetes.io/target-type"] = "ip" + } + ing := ingBuilder. + AddHTTPRoute("", networking.HTTPIngressPath{Path: "/path", PathType: &exact, Backend: ingBackend}). + WithAnnotations(annotation).Build(sandboxNS.Name, "ing") + resStack := fixture.NewK8SResourceStack(tf, dp, svc, ing) + err := resStack.Setup(ctx) + Expect(err).NotTo(HaveOccurred()) + + defer resStack.TearDown(ctx) + + lbARN, lbDNS := ExpectOneLBProvisionedForIngress(ctx, tf, ing) + sdkListeners, err := tf.LBManager.GetLoadBalancerListeners(ctx, lbARN) + + Eventually(func() bool { + return verifyListenerAttributes(ctx, tf, *sdkListeners[0].ListenerArn, map[string]string{ + "routing.http.response.server.enabled": "false", + }) == nil + }, utils.PollTimeoutShort, utils.PollIntervalMedium).Should(BeTrue()) + + // test traffic + ExpectLBDNSBeAvailable(ctx, tf, lbARN, lbDNS) + httpExp := httpexpect.New(tf.LoggerReporter, fmt.Sprintf("http://%v", lbDNS)) + httpExp.GET("/path").Expect(). + Status(http.StatusOK). + Body().Equal("Hello World!") + }) + }) }) // ExpectOneLBProvisionedForIngress expects one LoadBalancer provisioned for Ingress. @@ -820,3 +871,14 @@ func ExpectLBDNSBeAvailable(ctx context.Context, tf *framework.Framework, lbARN Expect(err).NotTo(HaveOccurred()) tf.Logger.Info("dns becomes available", "dns", lbDNS) } + +func verifyListenerAttributes(ctx context.Context, f *framework.Framework, lsARN string, expectedAttrs map[string]string) error { + lsAttrs, err := f.LBManager.GetListenerAttributes(ctx, lsARN) + Expect(err).NotTo(HaveOccurred()) + for _, attr := range lsAttrs { + if val, ok := expectedAttrs[awssdk.ToString(attr.Key)]; ok && val != awssdk.ToString(attr.Value) { + return errors.Errorf("Attribute %v, expected %v, actual %v", awssdk.ToString(attr.Key), val, awssdk.ToString(attr.Value)) + } + } + return nil +} diff --git a/version-stable.txt b/version-stable.txt index 10c2c0c3d6..8bbb6e406a 100644 --- a/version-stable.txt +++ b/version-stable.txt @@ -1 +1 @@ -2.10.0 +2.10.1 diff --git a/version.txt b/version.txt index 10c2c0c3d6..8bbb6e406a 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -2.10.0 +2.10.1