Skip to content

Commit da4d55e

Browse files
Sh4d1remyleone
andauthored
feat(k8s): add cluster and pool datasource (#724)
Signed-off-by: Patrik Cyvoct <[email protected]> Co-authored-by: Rémy Léone <[email protected]>
1 parent cfdc97c commit da4d55e

9 files changed

+5199
-0
lines changed

docs/data-sources/k8s_cluster.md

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
---
2+
page_title: "Scaleway: scaleway_k8s_cluster"
3+
description: |-
4+
Gets information about a Kubernetes Cluster.
5+
---
6+
7+
# scaleway_k8s_cluster
8+
9+
Gets information about a Kubernetes Cluster.
10+
11+
## Example Usage
12+
13+
```hcl
14+
# Get info by cluster name
15+
data "scaleway_k8s_cluster" "my_key" {
16+
name = "my-cluster-name"
17+
}
18+
19+
# Get info by cluster id
20+
data "scaleway_k8s_cluster" "my_key" {
21+
cluster_id = "11111111-1111-1111-1111-111111111111"
22+
}
23+
```
24+
25+
## Argument Reference
26+
27+
- `name` - (Optional) The cluster name. Only one of `name` and `cluster_id` should be specified.
28+
29+
- `cluster_id` - (Optional) The cluster ID. Only one of `name` and `cluster_id` should be specified.
30+
31+
- `region` - (Defaults to [provider](../index.md#region) `region`) The [region](../guides/regions_and_zones.md#regions) in which the cluster exists.
32+
33+
## Attributes Reference
34+
35+
In addition to all above arguments, the following attributes are exported:
36+
37+
- `id` - The ID of the cluster.
38+
39+
- `created_at` - The creation date of the cluster.
40+
41+
- `updated_at` - The last update date of the cluster.
42+
43+
- `apiserver_url` - The URL of the Kubernetes API server.
44+
45+
- `wildcard_dns` - The DNS wildcard that points to all ready nodes.
46+
47+
- `kubeconfig`
48+
49+
- `config_file` - The raw kubeconfig file.
50+
51+
- `host` - The URL of the Kubernetes API server.
52+
53+
- `cluster_ca_certificate` - The CA certificate of the Kubernetes API server.
54+
55+
- `token` - The token to connect to the Kubernetes API server.
56+
57+
- `status` - The status of the Kubernetes cluster.
58+
59+
- `upgrade_available` - True if a newer Kubernetes version is available.
60+
61+
- `description` - A description for the Kubernetes cluster.
62+
63+
- `version` - The version of the Kubernetes cluster.
64+
65+
- `cni` - The Container Network Interface (CNI) for the Kubernetes cluster.
66+
67+
- `enable_dashboard` - True if the [Kubernetes dashboard](https://github.com/kubernetes/dashboard) is enabled for the Kubernetes cluster.
68+
69+
- `ingress` - The [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) deployed on the Kubernetes cluster.
70+
71+
- `tags` - The tags associated with the Kubernetes cluster.
72+
73+
- `autoscaler_config` - The configuration options for the [Kubernetes cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler).
74+
75+
- `disable_scale_down` - True if the scale down feature of the autoscaler is disabled.
76+
77+
- `scale_down_delay_after_add` - The duration after scale up that scale down evaluation resumes.
78+
79+
- `scale_down_unneeded_time` - The duration a node should be unneeded before it is eligible for scale down.
80+
81+
- `estimator` - The type of resource estimator used in scale up.
82+
83+
- `expander` - The type of node group expander be used in scale up.
84+
85+
- `ignore_daemonsets_utilization` - True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
86+
87+
- `balance_similar_node_groups` - True if detecting similar node groups and balance the number of nodes between them is enabled.
88+
89+
- `expendable_pods_priority_cutoff` - Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
90+
91+
- `auto_upgrade` - The auto upgrade configuration.
92+
93+
- `enable` - True if Kubernetes patch version auto upgrades is enabled.
94+
95+
- `maintenance_window_start_hour` - The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
96+
97+
- `maintenance_window_day` - The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
98+
99+
- `feature_gates` - The list of [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/) enabled on the cluster.
100+
101+
- `admission_plugins` - The list of [admission plugins](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) enabled on the cluster.
102+
103+
- `region` - The [region](../guides/regions_and_zones.md#regions) in which the cluster is.
104+
105+
- `organization_id` - The ID of the organization the cluster is associated with.
106+
107+
- `project_id` - The ID of the project the cluster is associated with.
108+

docs/data-sources/k8s_pool.md

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
---
2+
page_title: "Scaleway: scaleway_k8s_pool"
3+
description: |-
4+
Gets information about a Kubernetes Cluster's Pool.
5+
---
6+
7+
# scaleway_k8s_pool
8+
9+
Gets information about a Kubernetes Cluster's Pool.
10+
11+
## Example Usage
12+
13+
```hcl
14+
# Get info by pokl name (need cluster_id)
15+
data "scaleway_k8s_pool" "my_key" {
16+
name = "my-pool-name"
17+
cluster_id = "11111111-1111-1111-1111-111111111111"
18+
}
19+
20+
# Get info by pool id
21+
data "scaleway_k8s_pool" "my_key" {
22+
pool_id = "11111111-1111-1111-1111-111111111111"
23+
}
24+
```
25+
26+
## Argument Reference
27+
28+
- `name` - The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
29+
30+
- `pool_id` - (Optional) The pool's ID. Only one of `name` and `pool_id` should be specified.
31+
32+
- `cluster_id` - (Optional) The cluster ID. Required when `name` is set.
33+
34+
- `region` - (Defaults to [provider](../index.md#region) `region`) The [region](../guides/regions_and_zones.md#regions) in which the pool exists.
35+
36+
## Attributes Reference
37+
38+
In addition to all above arguments, the following attributes are exported:
39+
40+
- `id` - The ID of the pool.
41+
42+
- `status` - The status of the pool.
43+
44+
- `nodes` - (List of) The nodes in the default pool.
45+
46+
- `name` - The name of the node.
47+
48+
- `public_ip` - The public IPv4.
49+
50+
- `public_ip_v6` - The public IPv6.
51+
52+
- `status` - The status of the node.
53+
54+
- `created_at` - The creation date of the pool.
55+
56+
- `updated_at` - The last update date of the pool.
57+
58+
- `version` - The version of the pool.
59+
60+
- `current_size` - The size of the pool at the time the terraform state was updated.
61+
62+
- `node_type` - The commercial type of the pool instances.
63+
64+
- `size` - The size of the pool.
65+
66+
- `min_size` - The minimum size of the pool, used by the autoscaling feature.
67+
68+
- `max_size` - The maximum size of the pool, used by the autoscaling feature.
69+
70+
- `tags` - The tags associated with the pool.
71+
72+
- `placement_group_id` - [placement group](https://developers.scaleway.com/en/products/instance/api/#placement-groups-d8f653) the nodes of the pool are attached to.
73+
74+
- `autoscaling` - True if the autoscaling feature is enabled for this pool.
75+
76+
- `autohealing` - True if the autohealing feature is enabled for this pool.
77+
78+
- `container_runtime` - The container runtime of the pool.

scaleway/data_source_k8s_cluster.go

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
package scaleway
2+
3+
import (
4+
"context"
5+
"fmt"
6+
7+
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
8+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
9+
k8s "github.com/scaleway/scaleway-sdk-go/api/k8s/v1"
10+
"github.com/scaleway/scaleway-sdk-go/scw"
11+
)
12+
13+
func dataSourceScalewayK8SCluster() *schema.Resource {
14+
// Generate datasource schema from resource
15+
dsSchema := datasourceSchemaFromResourceSchema(resourceScalewayK8SCluster().Schema)
16+
17+
// Set 'Optional' schema elements
18+
addOptionalFieldsToSchema(dsSchema, "name", "region")
19+
delete(dsSchema, "delete_additional_resources")
20+
21+
dsSchema["name"].ConflictsWith = []string{"cluster_id"}
22+
dsSchema["cluster_id"] = &schema.Schema{
23+
Type: schema.TypeString,
24+
Optional: true,
25+
Description: "The ID of the cluster",
26+
ValidateFunc: validationUUIDorUUIDWithLocality(),
27+
ConflictsWith: []string{"name"},
28+
}
29+
30+
return &schema.Resource{
31+
ReadContext: dataSourceScalewayK8SClusterRead,
32+
33+
Schema: dsSchema,
34+
}
35+
}
36+
37+
func dataSourceScalewayK8SClusterRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
38+
meta := m.(*Meta)
39+
k8sAPI, region, err := k8sAPIWithRegion(d, meta)
40+
if err != nil {
41+
return diag.FromErr(err)
42+
}
43+
44+
clusterID, ok := d.GetOk("cluster_id")
45+
if !ok {
46+
res, err := k8sAPI.ListClusters(&k8s.ListClustersRequest{
47+
Region: region,
48+
Name: expandStringPtr(d.Get("name")),
49+
ProjectID: expandStringPtr(d.Get("project_id")),
50+
}, scw.WithContext(ctx))
51+
if err != nil {
52+
return diag.FromErr(err)
53+
}
54+
for _, cluster := range res.Clusters {
55+
if cluster.Name == d.Get("name").(string) {
56+
if clusterID != "" {
57+
return diag.FromErr(fmt.Errorf("more than 1 cluster found with the same name %s", d.Get("name")))
58+
}
59+
clusterID = cluster.ID
60+
}
61+
}
62+
if clusterID == "" {
63+
return diag.FromErr(fmt.Errorf("no cluster found with the name %s", d.Get("name")))
64+
}
65+
}
66+
67+
regionalizedID := datasourceNewRegionalizedID(clusterID, region)
68+
d.SetId(regionalizedID)
69+
_ = d.Set("cluster_id", regionalizedID)
70+
return resourceScalewayK8SClusterRead(ctx, d, m)
71+
}
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
package scaleway
2+
3+
import (
4+
"fmt"
5+
"testing"
6+
7+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
8+
)
9+
10+
func TestAccScalewayDataSourceK8SCluster_Basic(t *testing.T) {
11+
tt := NewTestTools(t)
12+
defer tt.Cleanup()
13+
clusterName := "tf-cluster"
14+
version := testAccScalewayK8SClusterGetLatestK8SVersion(tt)
15+
resource.ParallelTest(t, resource.TestCase{
16+
PreCheck: func() { testAccPreCheck(t) },
17+
ProviderFactories: tt.ProviderFactories,
18+
CheckDestroy: testAccCheckScalewayK8SClusterDestroy(tt),
19+
Steps: []resource.TestStep{
20+
{
21+
Config: fmt.Sprintf(`
22+
resource "scaleway_k8s_cluster" "main" {
23+
name = "%s"
24+
version = "%s"
25+
cni = "cilium"
26+
tags = [ "terraform-test", "data_scaleway_k8s_cluster", "basic" ]
27+
}`, clusterName, version),
28+
},
29+
{
30+
Config: fmt.Sprintf(`
31+
resource "scaleway_k8s_cluster" "main" {
32+
name = "%s"
33+
version = "%s"
34+
cni = "cilium"
35+
tags = [ "terraform-test", "data_scaleway_k8s_cluster", "basic" ]
36+
}
37+
38+
data "scaleway_k8s_cluster" "prod" {
39+
name = "${scaleway_k8s_cluster.main.name}"
40+
}
41+
42+
data "scaleway_k8s_cluster" "stg" {
43+
cluster_id = "${scaleway_k8s_cluster.main.id}"
44+
}`, clusterName, version),
45+
Check: resource.ComposeTestCheckFunc(
46+
testAccCheckScalewayK8SClusterExists(tt, "data.scaleway_k8s_cluster.prod"),
47+
resource.TestCheckResourceAttr("data.scaleway_k8s_cluster.prod", "name", clusterName),
48+
testAccCheckScalewayK8SClusterExists(tt, "data.scaleway_k8s_cluster.stg"),
49+
resource.TestCheckResourceAttr("data.scaleway_k8s_cluster.stg", "name", clusterName),
50+
),
51+
},
52+
},
53+
})
54+
}

scaleway/data_source_k8s_pool.go

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
package scaleway
2+
3+
import (
4+
"context"
5+
"fmt"
6+
7+
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
8+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
9+
k8s "github.com/scaleway/scaleway-sdk-go/api/k8s/v1"
10+
"github.com/scaleway/scaleway-sdk-go/scw"
11+
)
12+
13+
func dataSourceScalewayK8SPool() *schema.Resource {
14+
// Generate datasource schema from resource
15+
dsSchema := datasourceSchemaFromResourceSchema(resourceScalewayK8SPool().Schema)
16+
17+
// Set 'Optional' schema elements
18+
addOptionalFieldsToSchema(dsSchema, "name", "region", "cluster_id", "size")
19+
20+
dsSchema["name"].ConflictsWith = []string{"pool_id"}
21+
dsSchema["cluster_id"].ConflictsWith = []string{"pool_id"}
22+
dsSchema["cluster_id"].RequiredWith = []string{"name"}
23+
dsSchema["pool_id"] = &schema.Schema{
24+
Type: schema.TypeString,
25+
Optional: true,
26+
Description: "The ID of the pool",
27+
ValidateFunc: validationUUIDorUUIDWithLocality(),
28+
ConflictsWith: []string{"name", "cluster_id"},
29+
}
30+
31+
return &schema.Resource{
32+
ReadContext: dataSourceScalewayK8SPoolRead,
33+
34+
Schema: dsSchema,
35+
}
36+
}
37+
38+
func dataSourceScalewayK8SPoolRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
39+
meta := m.(*Meta)
40+
k8sAPI, region, err := k8sAPIWithRegion(d, meta)
41+
if err != nil {
42+
return diag.FromErr(err)
43+
}
44+
45+
poolID, ok := d.GetOk("pool_id")
46+
if !ok {
47+
clusterID := expandRegionalID(d.Get("cluster_id"))
48+
res, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{
49+
Region: region,
50+
Name: expandStringPtr(d.Get("name")),
51+
ClusterID: clusterID.ID,
52+
}, scw.WithContext(ctx))
53+
if err != nil {
54+
return diag.FromErr(err)
55+
}
56+
for _, pool := range res.Pools {
57+
if pool.Name == d.Get("name").(string) {
58+
if poolID != "" {
59+
return diag.FromErr(fmt.Errorf("more than 1 pool found with the same name %s", d.Get("name")))
60+
}
61+
poolID = pool.ID
62+
}
63+
}
64+
if poolID == "" {
65+
return diag.FromErr(fmt.Errorf("no pool found with the name %s", d.Get("name")))
66+
}
67+
}
68+
69+
regionalizedID := datasourceNewRegionalizedID(poolID, region)
70+
d.SetId(regionalizedID)
71+
_ = d.Set("pool_id", regionalizedID)
72+
return resourceScalewayK8SPoolRead(ctx, d, m)
73+
}

0 commit comments

Comments
 (0)