Skip to content

e2e test changes to support china regions #2723

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 16, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 29 additions & 12 deletions scripts/run-e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,17 @@ function cleanUp(){

# IAM role and polcies are AWS Account specific, so need to clean them up if any from previous run
echo "detach IAM policy if it exists"
aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn arn:aws:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy || true
aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn arn:${AWS_PARTITION}:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy || true

# wait for 10 sec to complete detaching of IAM policy
sleep 10

echo "delete $ROLE_NAME if it exists"
aws iam delete-role --role-name $ROLE_NAME || true

# Need to do this as last step
echo "delete AWSLoadBalancerControllerIAMPolicy if it exists"
aws iam delete-policy --policy-arn arn:aws:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy || true
aws iam delete-policy --policy-arn arn:${AWS_PARTITION}:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy || true
}

echo "cordon off windows nodes"
Expand All @@ -49,6 +52,23 @@ echo "fetch OIDC provider"
OIDC_PROVIDER=$(echo $CLUSTER_INFO | jq -r '.cluster.identity.oidc.issuer' | sed -e "s/^https:\/\///")
echo "OIDC Provider: $OIDC_PROVIDER"

AWS_PARTITION="aws"
IAM_POLCIY_FILE="iam_policy.json"

if [[ $REGION == "cn-north-1" || $REGION == "cn-northwest-1" ]];then
AWS_PARTITION="aws-cn"
IAM_POLCIY_FILE="iam_policy_cn.json"
fi

if [[ $REGION == "cn-north-1" ]];then
IMAGE="918309763551.dkr.ecr.cn-north-1.amazonaws.com.cn/amazon/aws-load-balancer-controller"
elif [[ $REGION == "cn-northwest-1" ]];then
IMAGE="961992271922.dkr.ecr.cn-northwest-1.amazonaws.com.cn/amazon/aws-load-balancer-controller"
else
IMAGE="602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller"
fi

echo "IMAGE: $IMAGE"
echo "create IAM policy document file"
cat <<EOF > trust.json
{
Expand All @@ -57,7 +77,7 @@ cat <<EOF > trust.json
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::${ACCOUNT_ID}:oidc-provider/${OIDC_PROVIDER}"
"Federated": "arn:${AWS_PARTITION}:iam::${ACCOUNT_ID}:oidc-provider/${OIDC_PROVIDER}"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
Expand All @@ -80,27 +100,24 @@ aws iam create-role --role-name $ROLE_NAME --assume-role-policy-document file://
echo "creating AWSLoadbalancerController IAM Policy"
aws iam create-policy \
--policy-name AWSLoadBalancerControllerIAMPolicy \
--policy-document file://"$SCRIPT_DIR"/../docs/install/iam_policy.json || true
--policy-document file://"$SCRIPT_DIR"/../docs/install/${IAM_POLCIY_FILE} || true

echo "attaching AWSLoadbalancerController IAM Policy to $ROLE_NAME"
aws iam attach-role-policy --policy-arn arn:aws:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy --role-name $ROLE_NAME || true
aws iam attach-role-policy --policy-arn arn:${AWS_PARTITION}:iam::$ACCOUNT_ID:policy/AWSLoadBalancerControllerIAMPolicy --role-name $ROLE_NAME || true

echo "create service account"
kubectl create serviceaccount aws-load-balancer-controller -n kube-system || true

echo "annotate service account with $ROLE_NAME"
kubectl annotate serviceaccount -n kube-system aws-load-balancer-controller eks.amazonaws.com/role-arn=arn:aws:iam::"$ACCOUNT_ID":role/"$ROLE_NAME" --overwrite=true || true
kubectl annotate serviceaccount -n kube-system aws-load-balancer-controller eks.amazonaws.com/role-arn=arn:${AWS_PARTITION}:iam::"$ACCOUNT_ID":role/"$ROLE_NAME" --overwrite=true || true

echo "update helm repo eks"
helm repo add eks https://aws.github.io/eks-charts

helm repo update

echo "Install TargetGroupBinding CRDs"
kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master"

echo "Install aws-load-balancer-controller"
helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$CLUSTER_NAME --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller --set region=$REGION --set vpcId=$VPC_ID
helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$CLUSTER_NAME --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller --set region=$REGION --set vpcId=$VPC_ID --set image.repository=$IMAGE

echo_time() {
date +"%D %T $*"
Expand Down Expand Up @@ -146,7 +163,7 @@ run_ginkgo_test
# tail=-1 is added so that no logs are truncated
# https://github.com/kubernetes/kubectl/issues/812
echo "Fetch most recent aws-load-balancer-controller logs"
kubectl logs -l app.kubernetes.io/name=aws-load-balancer-controller --container aws-load-balancer-controller --tail=-1 -n kube-system
kubectl logs -l app.kubernetes.io/name=aws-load-balancer-controller --container aws-load-balancer-controller --tail=-1 -n kube-system || true

echo "Uncordon windows nodes"
toggle_windows_scheduling "uncordon"
Expand All @@ -155,6 +172,6 @@ echo "clean up resources from current run"
cleanUp

echo "Delete TargetGroupBinding CRDs if exists"
kubectl delete -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" --timeout=10m || true
kubectl delete -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" --timeout=30m || true

echo "Successfully finished the test suite $(($SECONDS / 60)) minutes and $(($SECONDS % 60)) seconds"
2 changes: 1 addition & 1 deletion test/e2e/ingress/multi_path_backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ func (s *multiPathBackendStack) buildBackendResource(ns *corev1.Namespace, backe
Containers: []corev1.Container{
{
Name: "app",
Image: "970805265562.dkr.ecr.us-west-2.amazonaws.com/colorteller:latest",
Image: utils.ColortellerImage,
Ports: []corev1.ContainerPort{
{
ContainerPort: 8080,
Expand Down
4 changes: 3 additions & 1 deletion test/framework/manifest/fixed_response_service_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@ package manifest

import (
"fmt"

"github.com/aws/aws-sdk-go/aws"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/aws-load-balancer-controller/test/framework/utils"
)

// NewFixedResponseServiceBuilder constructs a builder that capable to build manifest for an HTTP service with fixed response.
Expand Down Expand Up @@ -93,7 +95,7 @@ func (b *fixedResponseServiceBuilder) buildDeployment(namespace string, name str
Containers: []corev1.Container{
{
Name: "app",
Image: "970805265562.dkr.ecr.us-west-2.amazonaws.com/colorteller:latest",
Image: utils.ColortellerImage,
Ports: []corev1.ContainerPort{
{
Name: b.targetPortName,
Expand Down
5 changes: 5 additions & 0 deletions test/framework/utils/constants.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
package utils

const (
ColortellerImage = "chinmay5j/colorteller:latest"
)