Skip to content

fix(baremetal): refacto logic and debug raid #3098

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
May 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,18 +1,25 @@
---
subcategory: "Elastic Metal"
page_title: "Scaleway: scaleway_baremetal_easy_partitioning"
page_title: "Scaleway: scaleway_baremetal_partition_schema"
---

# scaleway_baremetal_easy_partitioning
# scaleway_baremetal_partition_schema

The scaleway_easy_partitioning data source allows you to retrieve a ready-to-use partitioning schema for a BareMetal server. This schema can be used for custom installations with optional swap and extra partitions.
The scaleway_baremetal_partition_schema data source allows you to retrieve a ready-to-use partitioning schema for a BareMetal server. This schema can be used for custom installations with optional swap and extra partitions.

This data source simplifies the process of generating valid partitioning configurations, especially useful when dealing with OS and offer compatibility requirements.

## Partitioning Details

The partitioning schema generated by the `scaleway_baremetal_partition_schema` data source includes a root (`/`) partition that is **20GB** in size by default.

If additional storage is required, you can enable the `extra_partition` option to mount extra space on a custom path (e.g., `/data`).


## Example Usage

```hcl
data "scaleway_easy_partitioning" "default" {
data "scaleway_baremetal_partition_schema" "default" {
offer_id = "11111111-1111-1111-1111-111111111111"
os_id = "22222222-2222-2222-2222-222222222222"
swap = true
Expand Down
2 changes: 1 addition & 1 deletion internal/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ func Provider(config *Config) plugin.ProviderFunc {
"scaleway_account_ssh_key": iam.DataSourceSSHKey(),
"scaleway_availability_zones": az.DataSourceAvailabilityZones(),
"scaleway_baremetal_offer": baremetal.DataSourceOffer(),
"scaleway_baremetal_easy_partitioning": baremetal.DataEasyPartitioning(),
"scaleway_baremetal_partition_schema": baremetal.DataPartitionSchema(),
"scaleway_baremetal_option": baremetal.DataSourceOption(),
"scaleway_baremetal_os": baremetal.DataSourceOS(),
"scaleway_baremetal_server": baremetal.DataSourceServer(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@ import (
"encoding/json"
"errors"
"fmt"
"strings"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/scaleway/scaleway-sdk-go/api/baremetal/v1"
"github.com/scaleway/scaleway-sdk-go/scw"
"github.com/scaleway/terraform-provider-scaleway/v2/internal/dsf"
Expand All @@ -16,12 +18,25 @@ import (
)

const (
partitionSize = 20000000000
partitionSize = 20000000000
defaultMountpoint = "/data"
md0 = "/dev/md0"
md1 = "/dev/md1"
md2 = "/dev/md2"
ext4 = "ext4"
raidLevel1 = "raid_level_1"
nvme0p2 = "/dev/nvme0n1p2"
nvme0p3 = "/dev/nvme0n1p3"
nvme1p1 = "/dev/nvme1n1p1"
nvme1p2 = "/dev/nvme1n1p2"
uefi = "uefi"
swap = "swap"
root = "root"
)

func DataEasyPartitioning() *schema.Resource {
func DataPartitionSchema() *schema.Resource {
return &schema.Resource{
ReadContext: dataEasyPartitioningRead,
ReadContext: dataPartitionSchemaRead,
Schema: map[string]*schema.Schema{
"offer_id": {
Type: schema.TypeString,
Expand All @@ -48,10 +63,11 @@ func DataEasyPartitioning() *schema.Resource {
Description: "set extra ext_4 partition",
},
"ext_4_mountpoint": {
Type: schema.TypeString,
Optional: true,
Default: "/data",
Description: "Mount point must be an absolute path with alphanumeric characters and underscores",
Type: schema.TypeString,
Optional: true,
Default: defaultMountpoint,
ValidateFunc: validation.StringInSlice([]string{"/data", "/home"}, false),
Description: "Mount point must be an absolute path",
},
"json_partition": {
Type: schema.TypeString,
Expand All @@ -62,7 +78,7 @@ func DataEasyPartitioning() *schema.Resource {
}
}

func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
func dataPartitionSchemaRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
api, fallBackZone, err := newAPIWithZone(d, m)
if err != nil {
return diag.FromErr(err)
Expand Down Expand Up @@ -105,30 +121,22 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
return diag.FromErr(err)
}

extraPart := d.Get("extra_partition").(bool)
swap := d.Get("swap").(bool)

if swap && !extraPart {
jsonSchema, err := json.Marshal(defaultPartitioningSchema)
if err != nil {
return diag.FromErr(err)
}

d.SetId(fmt.Sprintf("%s-%s", offerID, osID))
_ = d.Set("json_partition", string(jsonSchema))

return nil
hasSwap := d.Get("swap").(bool)
if !hasSwap {
removeSwap(defaultPartitioningSchema.Disks)
updateRaidRemoveSwap(defaultPartitioningSchema)
}

resizeRootPartition(defaultPartitioningSchema.Disks, swap, extraPart)
defaultPartitioningSchema.Disks = handleSwapPartitions(defaultPartitioningSchema.Disks, extraPart, swap)

mountpoint := d.Get("ext_4_mountpoint").(string)
addExtraExt4Partition(mountpoint, defaultPartitioningSchema, extraPart)
_, hasExtraPartition := d.GetOk("extra_partition")

if hasExtraPartition {
addExtraExt4Partition(mountpoint, defaultPartitioningSchema)
updateRaidNewPartition(defaultPartitioningSchema)
}

if !extraPart && !swap {
defaultPartitioningSchema.Filesystems = defaultPartitioningSchema.Filesystems[:len(defaultPartitioningSchema.Filesystems)-1]
defaultPartitioningSchema.Raids = defaultPartitioningSchema.Raids[:len(defaultPartitioningSchema.Raids)-1]
if !hasSwap || hasExtraPartition {
updateSizeRoot(defaultPartitioningSchema.Disks, hasExtraPartition)
}

err = api.ValidatePartitioningSchema(&baremetal.ValidatePartitioningSchemaRequest{
Expand All @@ -155,55 +163,49 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
return nil
}

func handleSwapPartitions(originalDisks []*baremetal.SchemaDisk, withExtraPartition bool, swap bool) []*baremetal.SchemaDisk {
if swap {
return originalDisks
func updateRaidRemoveSwap(partitionSchema *baremetal.Schema) {
raidSchema := []*baremetal.SchemaRAID{
{
Name: md0,
Level: raidLevel1,
Devices: []string{
nvme0p2,
nvme1p1,
},
},
{
Name: md1,
Level: raidLevel1,
Devices: []string{
nvme0p3,
nvme1p2,
},
},
}
partitionSchema.Raids = raidSchema
}

result := make([]*baremetal.SchemaDisk, 0)

for _, disk := range originalDisks {
i := 1
newPartitions := []*baremetal.SchemaPartition{}

for _, p := range disk.Partitions {
if p.Label == "swap" {
continue
}

if p.Label == "root" {
if !withExtraPartition {
p.Size = 0
p.UseAllAvailableSpace = true
} else {
p.Size = partitionSize
}
}

p.Number = uint32(i)
i++

newPartitions = append(newPartitions, p)
}

result = append(result, &baremetal.SchemaDisk{
Device: disk.Device,
Partitions: newPartitions,
})
func updateRaidNewPartition(partitionSchema *baremetal.Schema) {
lenDisk0Partition := len(partitionSchema.Disks[0].Partitions)
lenDisk1Partition := len(partitionSchema.Disks[1].Partitions)
raid := &baremetal.SchemaRAID{
Name: md2,
Level: raidLevel1,
Devices: []string{
fmt.Sprintf("%sp%d", partitionSchema.Disks[0].Device, lenDisk0Partition),
fmt.Sprintf("%sp%d", partitionSchema.Disks[1].Device, lenDisk1Partition),
},
}

return result
partitionSchema.Raids = append(partitionSchema.Raids, raid)
}

func addExtraExt4Partition(mountpoint string, defaultPartitionSchema *baremetal.Schema, extraPart bool) {
if !extraPart {
return
}
func addExtraExt4Partition(mountpoint string, defaultPartitionSchema *baremetal.Schema) {
label := strings.TrimPrefix(mountpoint, "/")

for _, disk := range defaultPartitionSchema.Disks {
partIndex := uint32(len(disk.Partitions)) + 1
data := &baremetal.SchemaPartition{
Label: baremetal.SchemaPartitionLabel("data"),
Label: baremetal.SchemaPartitionLabel(label),
Number: partIndex,
Size: 0,
UseAllAvailableSpace: true,
Expand All @@ -212,26 +214,45 @@ func addExtraExt4Partition(mountpoint string, defaultPartitionSchema *baremetal.
}

filesystem := &baremetal.SchemaFilesystem{
Device: "/dev/md2",
Format: "ext4",
Device: md2,
Format: ext4,
Mountpoint: mountpoint,
}
defaultPartitionSchema.Filesystems = append(defaultPartitionSchema.Filesystems, filesystem)
}

func resizeRootPartition(originalDisks []*baremetal.SchemaDisk, withSwap bool, withExtraPartition bool) {
func updateSizeRoot(originalDisks []*baremetal.SchemaDisk, hasExtraPartition bool) {
for _, disk := range originalDisks {
for _, partition := range disk.Partitions {
if partition.Label == "root" {
if !withSwap && !withExtraPartition {
partition.Size = 0
partition.UseAllAvailableSpace = true
}
if partition.Label == root {
partition.Size = 0
partition.UseAllAvailableSpace = true

if withExtraPartition {
if hasExtraPartition {
partition.Size = partitionSize
partition.UseAllAvailableSpace = false
}
}
}
}
}

func removeSwap(originalDisks []*baremetal.SchemaDisk) {
for _, disk := range originalDisks {
newPartitions := make([]*baremetal.SchemaPartition, 0, len(disk.Partitions))

for _, partition := range disk.Partitions {
if partition.Label == swap {
continue
}

if partition.Label != uefi {
partition.Number--
}

newPartitions = append(newPartitions, partition)
}

disk.Partitions = newPartitions
}
}
Loading
Loading