Skip to content

Commit 28ccece

Browse files
authored
fix: Disable creation of cluster security group rules that map to node security group when create_node_security_group = false (#2274)
* fix: Disable creation of cluster security group rules that map to node security group when `create_node_security_group` = `false` * feat: Update Fargate example to run only Fargate and show disabling of both cluster and node security groups * fix: Ensure CoreDNS changes are made ahead of install
1 parent 8dc5ad4 commit 28ccece

File tree

5 files changed

+171
-53
lines changed

5 files changed

+171
-53
lines changed

docs/faq.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ By default, EKS creates a cluster primary security group that is created outside
2626
attach_cluster_primary_security_group = true # default is false
2727
2828
node_security_group_tags = {
29-
"kubernetes.io/cluster/<CLUSTER_NAME>" = "" # or any other value other than "owned"
29+
"kubernetes.io/cluster/<CLUSTER_NAME>" = null # or any other value other than "owned"
3030
}
3131
```
3232

@@ -36,7 +36,7 @@ By default, EKS creates a cluster primary security group that is created outside
3636
attach_cluster_primary_security_group = true # default is false
3737
3838
cluster_tags = {
39-
"kubernetes.io/cluster/<CLUSTER_NAME>" = "" # or any other value other than "owned"
39+
"kubernetes.io/cluster/<CLUSTER_NAME>" = null # or any other value other than "owned"
4040
}
4141
```
4242

examples/fargate_profile/README.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,16 @@ Note that this example may create resources which cost money. Run `terraform des
2121
|------|---------|
2222
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
2323
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
24-
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
24+
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.7 |
25+
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
2526

2627
## Providers
2728

2829
| Name | Version |
2930
|------|---------|
3031
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
32+
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.7 |
33+
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
3134

3235
## Modules
3336

@@ -41,6 +44,11 @@ Note that this example may create resources which cost money. Run `terraform des
4144
| Name | Type |
4245
|------|------|
4346
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
47+
| [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
48+
| [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
49+
| [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
50+
| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source |
51+
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
4452

4553
## Inputs
4654

examples/fargate_profile/main.tf

Lines changed: 150 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,20 @@ provider "aws" {
22
region = local.region
33
}
44

5+
provider "helm" {
6+
kubernetes {
7+
host = module.eks.cluster_endpoint
8+
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
9+
10+
exec {
11+
api_version = "client.authentication.k8s.io/v1beta1"
12+
command = "aws"
13+
# This requires the awscli to be installed locally where Terraform is executed
14+
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
15+
}
16+
}
17+
}
18+
519
locals {
620
name = "ex-${replace(basename(path.cwd), "_", "-")}"
721
cluster_version = "1.22"
@@ -27,14 +41,8 @@ module "eks" {
2741
cluster_endpoint_public_access = true
2842

2943
cluster_addons = {
30-
# Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
31-
coredns = {
32-
resolve_conflicts = "OVERWRITE"
33-
}
3444
kube-proxy = {}
35-
vpc-cni = {
36-
resolve_conflicts = "OVERWRITE"
37-
}
45+
vpc-cni = {}
3846
}
3947

4048
cluster_encryption_config = [{
@@ -45,28 +53,13 @@ module "eks" {
4553
vpc_id = module.vpc.vpc_id
4654
subnet_ids = module.vpc.private_subnets
4755

48-
# You require a node group to schedule coredns which is critical for running correctly internal DNS.
49-
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
50-
# available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
51-
eks_managed_node_groups = {
52-
example = {
53-
desired_size = 1
54-
55-
instance_types = ["t3.large"]
56-
labels = {
57-
Example = "managed_node_groups"
58-
GithubRepo = "terraform-aws-eks"
59-
GithubOrg = "terraform-aws-modules"
60-
}
61-
tags = {
62-
ExtraTag = "example"
63-
}
64-
}
65-
}
56+
# Fargate profiles use the cluster primary security group so these are not utilized
57+
create_cluster_security_group = false
58+
create_node_security_group = false
6659

6760
fargate_profiles = {
68-
default = {
69-
name = "default"
61+
example = {
62+
name = "example"
7063
selectors = [
7164
{
7265
namespace = "backend"
@@ -75,15 +68,18 @@ module "eks" {
7568
}
7669
},
7770
{
78-
namespace = "default"
71+
namespace = "app-*"
7972
labels = {
80-
WorkerType = "fargate"
73+
Application = "app-wildcard"
8174
}
8275
}
8376
]
8477

78+
# Using specific subnets instead of the subnets supplied for the cluster itself
79+
subnet_ids = [module.vpc.private_subnets[1]]
80+
8581
tags = {
86-
Owner = "default"
82+
Owner = "secondary"
8783
}
8884

8985
timeouts = {
@@ -92,29 +88,138 @@ module "eks" {
9288
}
9389
}
9490

95-
secondary = {
96-
name = "secondary"
91+
kube_system = {
92+
name = "kube-system"
9793
selectors = [
98-
{
99-
namespace = "default"
100-
labels = {
101-
Environment = "test"
102-
GithubRepo = "terraform-aws-eks"
103-
GithubOrg = "terraform-aws-modules"
104-
}
105-
}
94+
{ namespace = "kube-system" }
10695
]
96+
}
97+
}
10798

108-
# Using specific subnets instead of the subnets supplied for the cluster itself
109-
subnet_ids = [module.vpc.private_subnets[1]]
99+
tags = local.tags
100+
}
110101

111-
tags = {
112-
Owner = "secondary"
102+
################################################################################
103+
# Modify EKS CoreDNS Deployment
104+
################################################################################
105+
106+
data "aws_eks_cluster_auth" "this" {
107+
name = module.eks.cluster_id
108+
}
109+
110+
locals {
111+
kubeconfig = yamlencode({
112+
apiVersion = "v1"
113+
kind = "Config"
114+
current-context = "terraform"
115+
clusters = [{
116+
name = module.eks.cluster_id
117+
cluster = {
118+
certificate-authority-data = module.eks.cluster_certificate_authority_data
119+
server = module.eks.cluster_endpoint
120+
}
121+
}]
122+
contexts = [{
123+
name = "terraform"
124+
context = {
125+
cluster = module.eks.cluster_id
126+
user = "terraform"
113127
}
128+
}]
129+
users = [{
130+
name = "terraform"
131+
user = {
132+
token = data.aws_eks_cluster_auth.this.token
133+
}
134+
}]
135+
})
136+
}
137+
138+
# Separate resource so that this is only ever executed once
139+
resource "null_resource" "remove_default_coredns_deployment" {
140+
triggers = {}
141+
142+
provisioner "local-exec" {
143+
interpreter = ["/bin/bash", "-c"]
144+
environment = {
145+
KUBECONFIG = base64encode(local.kubeconfig)
114146
}
147+
148+
# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
149+
# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
150+
command = <<-EOT
151+
kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
152+
EOT
115153
}
154+
}
116155

117-
tags = local.tags
156+
resource "null_resource" "modify_kube_dns" {
157+
triggers = {}
158+
159+
provisioner "local-exec" {
160+
interpreter = ["/bin/bash", "-c"]
161+
environment = {
162+
KUBECONFIG = base64encode(local.kubeconfig)
163+
}
164+
165+
# We are maintaing the existing kube-dns service and annotating it for Helm to assume control
166+
command = <<-EOT
167+
echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}"
168+
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
169+
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
170+
kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
171+
EOT
172+
}
173+
174+
depends_on = [
175+
null_resource.remove_default_coredns_deployment
176+
]
177+
}
178+
179+
################################################################################
180+
# CoreDNS Helm Chart (self-managed)
181+
################################################################################
182+
183+
data "aws_eks_addon_version" "this" {
184+
for_each = toset(["coredns"])
185+
186+
addon_name = each.value
187+
kubernetes_version = module.eks.cluster_version
188+
most_recent = true
189+
}
190+
191+
resource "helm_release" "coredns" {
192+
name = "coredns"
193+
namespace = "kube-system"
194+
create_namespace = false
195+
description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
196+
chart = "coredns"
197+
version = "1.19.4"
198+
repository = "https://coredns.github.io/helm"
199+
200+
# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
201+
values = [
202+
<<-EOT
203+
image:
204+
repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns
205+
tag: ${data.aws_eks_addon_version.this["coredns"].version}
206+
deployment:
207+
name: coredns
208+
annotations:
209+
eks.amazonaws.com/compute-type: fargate
210+
service:
211+
name: kube-dns
212+
annotations:
213+
eks.amazonaws.com/compute-type: fargate
214+
podAnnotations:
215+
eks.amazonaws.com/compute-type: fargate
216+
EOT
217+
]
218+
219+
depends_on = [
220+
# Need to ensure the CoreDNS updates are peformed before provisioning
221+
null_resource.modify_kube_dns
222+
]
118223
}
119224

120225
################################################################################

examples/fargate_profile/versions.tf

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,13 @@ terraform {
66
source = "hashicorp/aws"
77
version = ">= 3.72"
88
}
9-
kubernetes = {
10-
source = "hashicorp/kubernetes"
11-
version = ">= 2.10"
9+
helm = {
10+
source = "hashicorp/helm"
11+
version = ">= 2.7"
12+
}
13+
null = {
14+
source = "hashicorp/null"
15+
version = ">= 3.0"
1216
}
1317
}
1418
}

main.tf

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,8 @@ locals {
128128

129129
cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
130130

131-
cluster_security_group_rules = {
131+
# Do not add rules to node security group if the module is not creating it
132+
cluster_security_group_rules = local.create_node_sg ? {
132133
ingress_nodes_443 = {
133134
description = "Node groups to cluster API"
134135
protocol = "tcp"
@@ -153,7 +154,7 @@ locals {
153154
type = "egress"
154155
source_node_security_group = true
155156
}
156-
}
157+
} : {}
157158
}
158159

159160
resource "aws_security_group" "cluster" {

0 commit comments

Comments
 (0)