@@ -2,6 +2,20 @@ provider "aws" {
2
2
region = local. region
3
3
}
4
4
5
+ provider "helm" {
6
+ kubernetes {
7
+ host = module. eks . cluster_endpoint
8
+ cluster_ca_certificate = base64decode (module. eks . cluster_certificate_authority_data )
9
+
10
+ exec {
11
+ api_version = " client.authentication.k8s.io/v1beta1"
12
+ command = " aws"
13
+ # This requires the awscli to be installed locally where Terraform is executed
14
+ args = [" eks" , " get-token" , " --cluster-name" , module . eks . cluster_id ]
15
+ }
16
+ }
17
+ }
18
+
5
19
locals {
6
20
name = " ex-${ replace (basename (path. cwd ), " _" , " -" )} "
7
21
cluster_version = " 1.22"
@@ -27,14 +41,8 @@ module "eks" {
27
41
cluster_endpoint_public_access = true
28
42
29
43
cluster_addons = {
30
- # Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
31
- coredns = {
32
- resolve_conflicts = " OVERWRITE"
33
- }
34
44
kube-proxy = {}
35
- vpc-cni = {
36
- resolve_conflicts = " OVERWRITE"
37
- }
45
+ vpc-cni = {}
38
46
}
39
47
40
48
cluster_encryption_config = [{
@@ -45,28 +53,13 @@ module "eks" {
45
53
vpc_id = module. vpc . vpc_id
46
54
subnet_ids = module. vpc . private_subnets
47
55
48
- # You require a node group to schedule coredns which is critical for running correctly internal DNS.
49
- # If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
50
- # available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
51
- eks_managed_node_groups = {
52
- example = {
53
- desired_size = 1
54
-
55
- instance_types = [" t3.large" ]
56
- labels = {
57
- Example = " managed_node_groups"
58
- GithubRepo = " terraform-aws-eks"
59
- GithubOrg = " terraform-aws-modules"
60
- }
61
- tags = {
62
- ExtraTag = " example"
63
- }
64
- }
65
- }
56
+ # Fargate profiles use the cluster primary security group so these are not utilized
57
+ create_cluster_security_group = false
58
+ create_node_security_group = false
66
59
67
60
fargate_profiles = {
68
- default = {
69
- name = " default "
61
+ example = {
62
+ name = " example "
70
63
selectors = [
71
64
{
72
65
namespace = " backend"
@@ -75,15 +68,18 @@ module "eks" {
75
68
}
76
69
},
77
70
{
78
- namespace = " default "
71
+ namespace = " app-* "
79
72
labels = {
80
- WorkerType = " fargate "
73
+ Application = " app-wildcard "
81
74
}
82
75
}
83
76
]
84
77
78
+ # Using specific subnets instead of the subnets supplied for the cluster itself
79
+ subnet_ids = [module.vpc.private_subnets[1 ]]
80
+
85
81
tags = {
86
- Owner = " default "
82
+ Owner = " secondary "
87
83
}
88
84
89
85
timeouts = {
@@ -92,29 +88,138 @@ module "eks" {
92
88
}
93
89
}
94
90
95
- secondary = {
96
- name = " secondary "
91
+ kube_system = {
92
+ name = " kube-system "
97
93
selectors = [
98
- {
99
- namespace = " default"
100
- labels = {
101
- Environment = " test"
102
- GithubRepo = " terraform-aws-eks"
103
- GithubOrg = " terraform-aws-modules"
104
- }
105
- }
94
+ { namespace = " kube-system" }
106
95
]
96
+ }
97
+ }
107
98
108
- # Using specific subnets instead of the subnets supplied for the cluster itself
109
- subnet_ids = [module.vpc.private_subnets[ 1 ]]
99
+ tags = local . tags
100
+ }
110
101
111
- tags = {
112
- Owner = " secondary"
102
+ # ###############################################################################
103
+ # Modify EKS CoreDNS Deployment
104
+ # ###############################################################################
105
+
106
+ data "aws_eks_cluster_auth" "this" {
107
+ name = module. eks . cluster_id
108
+ }
109
+
110
+ locals {
111
+ kubeconfig = yamlencode ({
112
+ apiVersion = " v1"
113
+ kind = " Config"
114
+ current-context = " terraform"
115
+ clusters = [{
116
+ name = module.eks.cluster_id
117
+ cluster = {
118
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
119
+ server = module.eks.cluster_endpoint
120
+ }
121
+ }]
122
+ contexts = [{
123
+ name = " terraform"
124
+ context = {
125
+ cluster = module.eks.cluster_id
126
+ user = " terraform"
113
127
}
128
+ }]
129
+ users = [{
130
+ name = " terraform"
131
+ user = {
132
+ token = data.aws_eks_cluster_auth.this.token
133
+ }
134
+ }]
135
+ })
136
+ }
137
+
138
+ # Separate resource so that this is only ever executed once
139
+ resource "null_resource" "remove_default_coredns_deployment" {
140
+ triggers = {}
141
+
142
+ provisioner "local-exec" {
143
+ interpreter = [" /bin/bash" , " -c" ]
144
+ environment = {
145
+ KUBECONFIG = base64encode (local. kubeconfig )
114
146
}
147
+
148
+ # We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
149
+ # However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
150
+ command = <<- EOT
151
+ kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
152
+ EOT
115
153
}
154
+ }
116
155
117
- tags = local. tags
156
+ resource "null_resource" "modify_kube_dns" {
157
+ triggers = {}
158
+
159
+ provisioner "local-exec" {
160
+ interpreter = [" /bin/bash" , " -c" ]
161
+ environment = {
162
+ KUBECONFIG = base64encode (local. kubeconfig )
163
+ }
164
+
165
+ # We are maintaing the existing kube-dns service and annotating it for Helm to assume control
166
+ command = <<- EOT
167
+ echo "Setting implicit dependency on ${ module . eks . fargate_profiles [" kube_system" ]. fargate_profile_pod_execution_role_arn } "
168
+ kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
169
+ kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
170
+ kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
171
+ EOT
172
+ }
173
+
174
+ depends_on = [
175
+ null_resource. remove_default_coredns_deployment
176
+ ]
177
+ }
178
+
179
+ # ###############################################################################
180
+ # CoreDNS Helm Chart (self-managed)
181
+ # ###############################################################################
182
+
183
+ data "aws_eks_addon_version" "this" {
184
+ for_each = toset ([" coredns" ])
185
+
186
+ addon_name = each. value
187
+ kubernetes_version = module. eks . cluster_version
188
+ most_recent = true
189
+ }
190
+
191
+ resource "helm_release" "coredns" {
192
+ name = " coredns"
193
+ namespace = " kube-system"
194
+ create_namespace = false
195
+ description = " CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
196
+ chart = " coredns"
197
+ version = " 1.19.4"
198
+ repository = " https://coredns.github.io/helm"
199
+
200
+ # For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
201
+ values = [
202
+ <<- EOT
203
+ image:
204
+ repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns
205
+ tag: ${ data . aws_eks_addon_version . this [" coredns" ]. version }
206
+ deployment:
207
+ name: coredns
208
+ annotations:
209
+ eks.amazonaws.com/compute-type: fargate
210
+ service:
211
+ name: kube-dns
212
+ annotations:
213
+ eks.amazonaws.com/compute-type: fargate
214
+ podAnnotations:
215
+ eks.amazonaws.com/compute-type: fargate
216
+ EOT
217
+ ]
218
+
219
+ depends_on = [
220
+ # Need to ensure the CoreDNS updates are peformed before provisioning
221
+ null_resource. modify_kube_dns
222
+ ]
118
223
}
119
224
120
225
# ###############################################################################
0 commit comments