@@ -29,28 +29,30 @@ function toggle_windows_scheduling(){
29
29
TEST_ID=$( date +%s)
30
30
echo " TEST_ID: $TEST_ID "
31
31
ROLE_NAME=" aws-load-balancer-controller-$TEST_ID "
32
+ POLICY_NAME=" AWSLoadBalancerControllerIAMPolicy-$TEST_ID "
32
33
33
34
function cleanUp(){
34
- # Need to recreae aws-load-balancer controller if we are updating SA
35
- echo " delete aws-load-balancer-controller if exists"
36
- helm delete aws-load-balancer-controller -n kube-system --timeout=10m || true
35
+ echo " delete serviceaccount"
36
+ kubectl delete serviceaccount aws-load-balancer-controller -n kube-system --timeout 60s || true
37
37
38
- echo " delete service account if exists"
39
- kubectl delete serviceaccount aws-load-balancer-controller -n kube-system --timeout 10m || true
40
-
41
- # IAM role and polcies are AWS Account specific, so need to clean them up if any from previous run
42
- echo " detach IAM policy if it exists"
43
- aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn arn:${AWS_PARTITION} :iam::$ACCOUNT_ID :policy/AWSLoadBalancerControllerIAMPolicy || true
38
+ echo " detach IAM policy"
39
+ aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn arn:${AWS_PARTITION} :iam::$ACCOUNT_ID :policy/$POLICY_NAME || true
44
40
45
41
# wait for 10 sec to complete detaching of IAM policy
46
42
sleep 10
47
43
48
- echo " delete $ROLE_NAME if it exists "
44
+ echo " delete $ROLE_NAME "
49
45
aws iam delete-role --role-name $ROLE_NAME || true
50
46
51
- # Need to do this as last step
52
- echo " delete AWSLoadBalancerControllerIAMPolicy if it exists"
53
- aws iam delete-policy --policy-arn arn:${AWS_PARTITION} :iam::$ACCOUNT_ID :policy/AWSLoadBalancerControllerIAMPolicy || true
47
+ echo " delete $POLICY_NAME "
48
+ aws iam delete-policy --policy-arn arn:${AWS_PARTITION} :iam::$ACCOUNT_ID :policy/$POLICY_NAME || true
49
+
50
+ echo " Delete CRDs if exists"
51
+ if [[ $ADC_REGIONS == * " $REGION " * ]]; then
52
+ kubectl delete -k " ../helm/aws-load-balancer-controller/crds" --timeout=30s || true
53
+ else
54
+ kubectl delete -k " github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" --timeout=30s || true
55
+ fi
54
56
}
55
57
56
58
echo " cordon off windows nodes"
@@ -104,19 +106,16 @@ cat <<EOF > trust.json
104
106
}
105
107
EOF
106
108
107
- echo " cleanup any stale resources from previous run"
108
- cleanUp
109
-
110
109
echo " create Role with above policy document"
111
110
aws iam create-role --role-name $ROLE_NAME --assume-role-policy-document file://trust.json --description " IAM Role to be used by aws-load-balancer-controller SA" || true
112
111
113
112
echo " creating AWSLoadbalancerController IAM Policy"
114
113
aws iam create-policy \
115
- --policy-name AWSLoadBalancerControllerIAMPolicy \
114
+ --policy-name $POLICY_NAME \
116
115
--policy-document file://" $SCRIPT_DIR " /../docs/install/${IAM_POLCIY_FILE} || true
117
116
118
117
echo " attaching AWSLoadBalancerController IAM Policy to $ROLE_NAME "
119
- aws iam attach-role-policy --policy-arn arn:${AWS_PARTITION} :iam::$ACCOUNT_ID :policy/AWSLoadBalancerControllerIAMPolicy --role-name $ROLE_NAME || true
118
+ aws iam attach-role-policy --policy-arn arn:${AWS_PARTITION} :iam::$ACCOUNT_ID :policy/$POLICY_NAME --role-name $ROLE_NAME || true
120
119
121
120
echo " create service account"
122
121
kubectl create serviceaccount aws-load-balancer-controller -n kube-system || true
@@ -217,15 +216,15 @@ kubectl logs -l app.kubernetes.io/name=aws-load-balancer-controller --container
217
216
echo " Uncordon windows nodes"
218
217
toggle_windows_scheduling " uncordon"
219
218
220
- echo " clean up resources from current run"
221
- cleanUp
222
-
223
- echo " Delete CRDs if exists"
219
+ echo " uninstalling aws load balancer controller"
224
220
if [[ $ADC_REGIONS == * " $REGION " * ]]; then
225
- kubectl delete -k " ../helm/aws-load-balancer-controller/crds" --timeout=30m || true
221
+ kubectl delete -f $controller_yaml --timeout=60s || true
222
+ kubectl delete -f $cert_manager_yaml --timeout=60s || true
226
223
else
227
- kubectl delete -k " github.com/ aws/eks-charts/stable/aws- load-balancer-controller//crds?ref=master " -- timeout=30m || true
224
+ helm uninstall aws- load-balancer-controller -n kube-system -- timeout=60s || true
228
225
fi
226
+ echo " clean up resources from current run"
227
+ cleanUp
229
228
230
229
if [[ " $TEST_RESULT " == fail ]]; then
231
230
echo " e2e tests failed."
0 commit comments