35
35
PYTHON_SAMPLE_APP_NAMESPACE : python-app-namespace
36
36
METRIC_NAMESPACE : AppSignals
37
37
LOG_GROUP_NAME : /aws/appsignals/eks
38
+ TEST_RESOURCES_FOLDER : /home/runner/work/aws-application-signals-test-framework/aws-application-signals-test-framework
38
39
39
40
jobs :
40
41
python-e2e-eks-test :
@@ -47,24 +48,17 @@ jobs:
47
48
fetch-depth : 0
48
49
49
50
- name : Download enablement script
50
- uses : actions/checkout@v4
51
+ uses : ./.github/workflows/ actions/execute_and_retry
51
52
with :
52
- repository : aws-observability/application-signals-demo
53
- ref : main
54
- path : enablement-script
55
- sparse-checkout : |
56
- scripts/eks/appsignals/enable-app-signals.sh
57
- scripts/eks/appsignals/clean-app-signals.sh
58
- sparse-checkout-cone-mode : false
59
-
60
- - name : Resolve Add-on configuration conflict
61
- working-directory : enablement-script/scripts/eks/appsignals
62
- run : |
63
- sed -i 's/aws eks create-addon \\/aws eks create-addon \\\n --resolve-conflicts OVERWRITE \\/' enable-app-signals.sh
53
+ pre-command : " mkdir enablement-script && cd enablement-script"
54
+ command : " wget https://raw.githubusercontent.com/aws-observability/application-signals-demo/main/scripts/eks/appsignals/enable-app-signals.sh
55
+ && wget https://raw.githubusercontent.com/aws-observability/application-signals-demo/main/scripts/eks/appsignals/clean-app-signals.sh"
56
+ cleanup : " rm -f enable-app-signals.sh && rm -f clean-app-signals.sh"
57
+ post-command : " chmod +x enable-app-signals.sh && chmod +x clean-app-signals.sh"
64
58
65
59
- name : Remove log group deletion command
66
60
if : always()
67
- working-directory : enablement-script/scripts/eks/appsignals
61
+ working-directory : enablement-script
68
62
run : |
69
63
delete_log_group="aws logs delete-log-group --log-group-name '${{ env.LOG_GROUP_NAME }}' --region \$REGION"
70
64
sed -i "s#$delete_log_group##g" clean-app-signals.sh
@@ -103,38 +97,47 @@ jobs:
103
97
run : aws eks update-kubeconfig --name ${{ inputs.test-cluster-name }} --region ${{ inputs.aws-region }}
104
98
105
99
- name : Install eksctl
106
- working-directory : .github/
100
+ uses : ./.github/workflows/actions/execute_and_retry
101
+ with :
102
+ pre-command : ' mkdir ${{ github.workspace }}/eksctl'
103
+ command : ' curl -sLO "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz"
104
+ && tar -xzf eksctl_Linux_amd64.tar.gz -C ${{ github.workspace }}/eksctl && rm eksctl_Linux_amd64.tar.gz'
105
+ cleanup : ' rm -f eksctl_Linux_amd64.tar.gz'
106
+
107
+ - name : Add eksctl to Github Path
107
108
run : |
108
- source ./workflows/util/execute_and_retry.sh
109
- mkdir ${{ github.workspace }}/eksctl
110
- curl -sLO "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_Linux_amd64.tar.gz"
111
- execute_and_retry 2 "tar -xzf eksctl_Linux_amd64.tar.gz -C ${{ github.workspace }}/eksctl && rm eksctl_Linux_amd64.tar.gz"
112
109
echo "${{ github.workspace }}/eksctl" >> $GITHUB_PATH
113
110
114
111
- name : Create role for AWS access from the sample app
115
112
id : create_service_account
116
- run : |
117
- eksctl create iamserviceaccount \
113
+ uses : ./.github/workflows/actions/execute_and_retry
114
+ with :
115
+ command : " eksctl create iamserviceaccount \
118
116
--name service-account-${{ env.TESTING_ID }} \
119
117
--namespace ${{ env.PYTHON_SAMPLE_APP_NAMESPACE }} \
120
118
--cluster ${{ inputs.test-cluster-name }} \
121
119
--role-name eks-s3-access-${{ env.TESTING_ID }} \
122
120
--attach-policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess \
123
121
--region ${{ inputs.aws-region }} \
124
- --approve
122
+ --approve"
125
123
126
124
- name : Set up terraform
127
- uses : hashicorp/setup-terraform@v3
125
+ uses : ./.github/workflows/actions/execute_and_retry
126
+ with :
127
+ command : " wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg"
128
+ post-command : ' echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
129
+ && sudo apt update && sudo apt install terraform'
130
+
131
+ - name : Initiate Terraform
132
+ uses : ./.github/workflows/actions/execute_and_retry
128
133
with :
129
- terraform_wrapper : false
134
+ command : " cd terraform/python/eks && terraform init && terraform validate"
135
+ cleanup : " rm -rf .terraform && rm -rf .terraform.lock.hcl"
130
136
131
137
- name : Deploy sample app via terraform and wait for the endpoint to come online
132
138
id : deploy-python-app
133
139
working-directory : terraform/python/eks
134
140
run : |
135
- terraform init
136
- terraform validate
137
-
138
141
# Attempt to deploy the sample app on an EKS instance and wait for its endpoint to come online.
139
142
# There may be occasional failures due to transitivity issues, so try up to 2 times.
140
143
# deployment_failed of 0 indicates that both the terraform deployment and the endpoint are running, while 1 indicates
@@ -163,26 +166,23 @@ jobs:
163
166
# If the deployment_failed is still 0, then the terraform deployment succeeded and now try to connect to the endpoint
164
167
# after installing App Signals. Attempts to connect will be made for up to 10 minutes
165
168
if [ $deployment_failed -eq 0 ]; then
166
- kubectl wait --for=condition=Ready pod --all -n ${{ env.PYTHON_SAMPLE_APP_NAMESPACE }}
167
-
168
169
echo "Installing app signals to the sample app"
169
170
source ${GITHUB_WORKSPACE}/.github/workflows/util/execute_and_retry.sh
170
171
execute_and_retry 2 \
171
- "${GITHUB_WORKSPACE}/enablement-script/scripts/eks/appsignals/ enable-app-signals.sh \
172
+ "${GITHUB_WORKSPACE}/enablement-script/enable-app-signals.sh \
172
173
${{ inputs.test-cluster-name }} \
173
174
${{ inputs.aws-region }} \
174
175
${{ env.PYTHON_SAMPLE_APP_NAMESPACE }}" \
175
- "${GITHUB_WORKSPACE}/enablement-script/scripts/eks/appsignals/ clean-app-signals.sh \
176
+ "${GITHUB_WORKSPACE}/enablement-script/clean-app-signals.sh \
176
177
${{ inputs.test-cluster-name }} \
177
178
${{ inputs.aws-region }} \
178
179
${{ env.PYTHON_SAMPLE_APP_NAMESPACE }} && \
179
180
aws eks update-kubeconfig --name ${{ inputs.test-cluster-name }} --region ${{ inputs.aws-region }}"
180
181
181
-
182
182
execute_and_retry 2 "kubectl delete pods --all -n ${{ env.PYTHON_SAMPLE_APP_NAMESPACE }}"
183
183
execute_and_retry 2 "kubectl wait --for=condition=Ready --request-timeout '5m' pod --all -n ${{ env.PYTHON_SAMPLE_APP_NAMESPACE }}"
184
184
185
- echo "Attempting to connect to the endpoint"
185
+ echo "Attempting to connect to the main sample app endpoint"
186
186
python_app_endpoint=http://$(terraform output python_app_endpoint)
187
187
attempt_counter=0
188
188
max_attempts=60
@@ -203,9 +203,13 @@ jobs:
203
203
# resources created from terraform and try again.
204
204
if [ $deployment_failed -eq 1 ]; then
205
205
echo "Cleaning up App Signal"
206
+ ${GITHUB_WORKSPACE}/enablement-script/clean-app-signals.sh \
207
+ ${{ inputs.test-cluster-name }} \
208
+ ${{ inputs.aws-region }} \
209
+ ${{ env.PYTHON_SAMPLE_APP_NAMESPACE }}
206
210
207
- cd ${{ github.workspace }}/amazon-cloudwatch-agent-operator/
208
- kubectl delete -f ./namespace.yaml
211
+ # Running clean-app-signal.sh removes the current cluster from the config. Update the cluster again for subsequent runs.
212
+ aws eks update-kubeconfig --name ${{ inputs.test-cluster-name }} --region ${{ inputs.aws-region }}
209
213
210
214
echo "Destroying terraform"
211
215
terraform destroy -auto-approve \
@@ -231,20 +235,6 @@ jobs:
231
235
fi
232
236
done
233
237
234
- # Attach policies to cluster node group roles that are required for AppSignals
235
- aws eks list-nodegroups --cluster-name ${{ inputs.test-cluster-name }} --region ${{ inputs.aws-region }} |\
236
- jq -r '.nodegroups[]' |\
237
- while read -r node_group;
238
- do
239
- node_role=$(\
240
- aws eks describe-nodegroup --cluster-name ${{ inputs.test-cluster-name }} --nodegroup-name $node_group --region ${{ inputs.aws-region }} |\
241
- jq -r '.nodegroup.nodeRole' |\
242
- cut -d'/' -f2
243
- )
244
- aws iam attach-role-policy --role-name $node_role --policy-arn arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy --region ${{ inputs.aws-region }}
245
- aws iam attach-role-policy --role-name $node_role --policy-arn arn:aws:iam::aws:policy/AWSXRayWriteOnlyAccess --region ${{ inputs.aws-region }}
246
- done
247
-
248
238
- name : Get remote service pod name and IP
249
239
run : |
250
240
echo "REMOTE_SERVICE_DEPLOYMENT_NAME=$(kubectl get deployments -n ${{ env.PYTHON_SAMPLE_APP_NAMESPACE }} --selector=app=remote-app -o jsonpath='{.items[0].metadata.name}')" >> $GITHUB_ENV
@@ -348,7 +338,7 @@ jobs:
348
338
- name : Clean Up App Signals
349
339
if : always()
350
340
continue-on-error : true
351
- working-directory : enablement-script/scripts/eks/appsignals
341
+ working-directory : enablement-script
352
342
run : |
353
343
./clean-app-signals.sh \
354
344
${{ inputs.test-cluster-name }} \
0 commit comments