Skip to content

Commit 3b55f23

Browse files
committed
Set blue green defaults null to use provider default
1 parent 4a8f6ec commit 3b55f23

File tree

21 files changed

+53
-257
lines changed

21 files changed

+53
-257
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ The node_pools variable takes the following parameters:
284284
| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional |
285285
| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional |
286286
| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional |
287-
| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional |
287+
| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional |
288288
| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional |
289289
| name | The name of the node pool | | Required |
290290
| node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required |

autogen/main/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ The node_pools variable takes the following parameters:
220220
| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional |
221221
| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional |
222222
| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional |
223-
| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional |
223+
| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional |
224224
| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional |
225225
| name | The name of the node pool | | Required |
226226
{% if beta_cluster %}

autogen/main/cluster.tf.tmpl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -692,12 +692,12 @@ resource "google_container_node_pool" "windows_pools" {
692692
dynamic "blue_green_settings" {
693693
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
694694
content {
695-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s")
695+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
696696

697697
standard_rollout_policy {
698-
batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s")
698+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
699699
batch_percentage = lookup(each.value, "batch_percentage", null)
700-
batch_node_count = lookup(each.value, "batch_node_count", 1)
700+
batch_node_count = lookup(each.value, "batch_node_count", null)
701701
}
702702
}
703703
}

autogen/main/variables.tf.tmpl

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -756,41 +756,5 @@ variable "strategy" {
756756
description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)"
757757
default = "SURGE"
758758
}
759-
760-
variable "max_surge" {
761-
type = number
762-
description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)"
763-
default = null
764-
}
765-
766-
variable "max_unavailable" {
767-
type = number
768-
description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)"
769-
default = null
770-
}
771-
772-
variable "node_pool_soak_duration" {
773-
type = string
774-
description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)"
775-
default = "3600s"
776-
}
777-
778-
variable "batch_soak_duration" {
779-
type = string
780-
description = "Soak time after each batch gets drained (Optionial)"
781-
default = "0s"
782-
}
783-
784-
variable "batch_percentage" {
785-
type = string
786-
description = "Percentage of the blue pool nodes to drain in a batch (Optional)"
787-
default = null
788-
}
789-
790-
variable "batch_node_count" {
791-
type = number
792-
description = "The number of blue nodes to drain in a batch (Optional)"
793-
default = null
794-
}
795759
{% endif %}
796760
{% endif %}

cluster.tf

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -413,12 +413,12 @@ resource "google_container_node_pool" "pools" {
413413
dynamic "blue_green_settings" {
414414
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
415415
content {
416-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s")
416+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
417417

418418
standard_rollout_policy {
419-
batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s")
419+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
420420
batch_percentage = lookup(each.value, "batch_percentage", null)
421-
batch_node_count = lookup(each.value, "batch_node_count", 1)
421+
batch_node_count = lookup(each.value, "batch_node_count", null)
422422
}
423423
}
424424
}
@@ -599,12 +599,12 @@ resource "google_container_node_pool" "windows_pools" {
599599
dynamic "blue_green_settings" {
600600
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
601601
content {
602-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s")
602+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
603603

604604
standard_rollout_policy {
605-
batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s")
605+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
606606
batch_percentage = lookup(each.value, "batch_percentage", null)
607-
batch_node_count = lookup(each.value, "batch_node_count", 1)
607+
batch_node_count = lookup(each.value, "batch_node_count", null)
608608
}
609609
}
610610
}

modules/beta-private-cluster-update-variant/README.md

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -163,9 +163,6 @@ Then perform the following commands on the root folder:
163163
| add\_master\_webhook\_firewall\_rules | Create master\_webhook firewall rules for ports defined in `firewall_inbound_ports` | `bool` | `false` | no |
164164
| add\_shadow\_firewall\_rules | Create GKE shadow firewall (the same as default firewall rules with firewall logs enabled). | `bool` | `false` | no |
165165
| authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format [email protected] | `string` | `null` | no |
166-
| batch\_node\_count | The number of blue nodes to drain in a batch (Optional) | `number` | `null` | no |
167-
| batch\_percentage | Percentage of the blue pool nodes to drain in a batch (Optional) | `string` | `null` | no |
168-
| batch\_soak\_duration | Soak time after each batch gets drained (Optionial) | `string` | `"0s"` | no |
169166
| cloudrun | (Beta) Enable CloudRun addon | `bool` | `false` | no |
170167
| cloudrun\_load\_balancer\_type | (Beta) Configure the Cloud Run load balancer type. External by default. Set to `LOAD_BALANCER_TYPE_INTERNAL` to configure as an internal load balancer. | `string` | `""` | no |
171168
| cluster\_autoscaling | Cluster autoscaling configuration. See [more details](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#clusterautoscaling) | <pre>object({<br> enabled = bool<br> autoscaling_profile = string<br> min_cpu_cores = number<br> max_cpu_cores = number<br> min_memory_gb = number<br> max_memory_gb = number<br> gpu_resources = list(object({ resource_type = string, minimum = number, maximum = number }))<br> auto_repair = bool<br> auto_upgrade = bool<br> })</pre> | <pre>{<br> "auto_repair": true,<br> "auto_upgrade": true,<br> "autoscaling_profile": "BALANCED",<br> "enabled": false,<br> "gpu_resources": [],<br> "max_cpu_cores": 0,<br> "max_memory_gb": 0,<br> "min_cpu_cores": 0,<br> "min_memory_gb": 0<br>}</pre> | no |
@@ -230,8 +227,6 @@ Then perform the following commands on the root folder:
230227
| master\_authorized\_networks | List of master authorized networks. If none are provided, disallow external access (except the cluster node IPs, which GKE automatically whitelists). | `list(object({ cidr_block = string, display_name = string }))` | `[]` | no |
231228
| master\_global\_access\_enabled | Whether the cluster master is accessible globally (from any region) or only within the same region as the private endpoint. | `bool` | `true` | no |
232229
| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | `string` | `"10.0.0.0/28"` | no |
233-
| max\_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max\_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional) | `number` | `null` | no |
234-
| max\_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max\_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional) | `number` | `null` | no |
235230
| monitoring\_enable\_managed\_prometheus | Configuration for Managed Service for Prometheus. Whether or not the managed collection is enabled. | `bool` | `false` | no |
236231
| monitoring\_enabled\_components | List of services to monitor: SYSTEM\_COMPONENTS, WORKLOADS (provider version >= 3.89.0). Empty list is default GKE configuration. | `list(string)` | `[]` | no |
237232
| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | `string` | `"monitoring.googleapis.com/kubernetes"` | no |
@@ -241,7 +236,6 @@ Then perform the following commands on the root folder:
241236
| network\_policy\_provider | The network policy provider. | `string` | `"CALICO"` | no |
242237
| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | `string` | `""` | no |
243238
| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | `string` | `"GKE_METADATA"` | no |
244-
| node\_pool\_soak\_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional) | `string` | `"3600s"` | no |
245239
| node\_pools | List of maps containing node pools | `list(map(any))` | <pre>[<br> {<br> "name": "default-node-pool"<br> }<br>]</pre> | no |
246240
| node\_pools\_labels | Map of maps containing node labels by node-pool name | `map(map(string))` | <pre>{<br> "all": {},<br> "default-node-pool": {}<br>}</pre> | no |
247241
| node\_pools\_linux\_node\_configs\_sysctls | Map of maps containing linux node config sysctls by node-pool name | `map(map(string))` | <pre>{<br> "all": {},<br> "default-node-pool": {}<br>}</pre> | no |
@@ -356,7 +350,7 @@ The node_pools variable takes the following parameters:
356350
| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional |
357351
| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional |
358352
| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional |
359-
| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional |
353+
| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional |
360354
| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional |
361355
| name | The name of the node pool | | Required |
362356
| placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional |

modules/beta-private-cluster-update-variant/cluster.tf

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -602,12 +602,12 @@ resource "google_container_node_pool" "pools" {
602602
dynamic "blue_green_settings" {
603603
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
604604
content {
605-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s")
605+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
606606

607607
standard_rollout_policy {
608-
batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s")
608+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
609609
batch_percentage = lookup(each.value, "batch_percentage", null)
610-
batch_node_count = lookup(each.value, "batch_node_count", 1)
610+
batch_node_count = lookup(each.value, "batch_node_count", null)
611611
}
612612
}
613613
}
@@ -828,12 +828,12 @@ resource "google_container_node_pool" "windows_pools" {
828828
dynamic "blue_green_settings" {
829829
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
830830
content {
831-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s")
831+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
832832

833833
standard_rollout_policy {
834-
batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s")
834+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
835835
batch_percentage = lookup(each.value, "batch_percentage", null)
836-
batch_node_count = lookup(each.value, "batch_node_count", 1)
836+
batch_node_count = lookup(each.value, "batch_node_count", null)
837837
}
838838
}
839839
}

modules/beta-private-cluster-update-variant/variables.tf

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -726,39 +726,3 @@ variable "strategy" {
726726
description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)"
727727
default = "SURGE"
728728
}
729-
730-
variable "max_surge" {
731-
type = number
732-
description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)"
733-
default = null
734-
}
735-
736-
variable "max_unavailable" {
737-
type = number
738-
description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)"
739-
default = null
740-
}
741-
742-
variable "node_pool_soak_duration" {
743-
type = string
744-
description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)"
745-
default = "3600s"
746-
}
747-
748-
variable "batch_soak_duration" {
749-
type = string
750-
description = "Soak time after each batch gets drained (Optionial)"
751-
default = "0s"
752-
}
753-
754-
variable "batch_percentage" {
755-
type = string
756-
description = "Percentage of the blue pool nodes to drain in a batch (Optional)"
757-
default = null
758-
}
759-
760-
variable "batch_node_count" {
761-
type = number
762-
description = "The number of blue nodes to drain in a batch (Optional)"
763-
default = null
764-
}

0 commit comments

Comments
 (0)