Skip to content
10 changes: 10 additions & 0 deletions proxmox/nodes/containers/containers.go
Original file line number Diff line number Diff line change
Expand Up @@ -370,3 +370,13 @@ func (c *Client) WaitForContainerConfigUnlock(ctx context.Context, ignoreErrorRe

return nil
}

// Resize Disk
func (c *Client) ResizeContainerDisk(ctx context.Context, d *ResizeRequestBody) error {
err := c.DoRequest(ctx, http.MethodPut, c.ExpandPath("resize"), d, nil)
if err != nil {
return fmt.Errorf("error resize disk: %w", err)
}

return nil
}
5 changes: 5 additions & 0 deletions proxmox/nodes/containers/containers_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,11 @@ type ShutdownRequestBody struct {
Timeout *int `json:"timeout,omitempty" url:"timeout,omitempty"`
}

type ResizeRequestBody struct {
Disk string `json:"disk" url:"disk"`
Size string `json:"size" url:"size"`
}

// UpdateRequestBody contains the data for an user update request.
type UpdateRequestBody CreateRequestBody

Expand Down
112 changes: 96 additions & 16 deletions proxmoxtf/resource/container/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"regexp"
"slices"
"sort"
"strconv"
"strings"
Expand Down Expand Up @@ -333,7 +334,6 @@ func Container() *schema.Resource {
Type: schema.TypeList,
Description: "The disks",
Optional: true,
ForceNew: true,
DefaultFunc: func() (interface{}, error) {
return []interface{}{
map[string]interface{}{
Expand Down Expand Up @@ -374,7 +374,6 @@ func Container() *schema.Resource {
Type: schema.TypeInt,
Description: "The rootfs size in gigabytes",
Optional: true,
ForceNew: true,
Default: dvDiskSize,
ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(0)),
},
Expand Down Expand Up @@ -1025,6 +1024,55 @@ func Container() *schema.Resource {
return strconv.Itoa(newValue.(int)) != d.Id()
},
),
customdiff.ForceNewIf(
mkDisk,
func(_ context.Context, d *schema.ResourceDiff, _ interface{}) bool {
oldRaw, newRaw := d.GetChange(mkDisk)
oldList, _ := oldRaw.([]interface{})
newList, _ := newRaw.([]interface{})

if oldList == nil {
oldList = []interface{}{}
}
if newList == nil {
newList = []interface{}{}
}

// fmt.Printf("ALEX: ALL DISK: old: %v ---- new: %v\n", old, new)

minDrives := min(len(oldList), len(newList))

for i := range minDrives {
oldSize := dvDiskSize
newSize := dvDiskSize
if i < len(oldList) && oldList[i] != nil {
if om, ok := oldList[i].(map[string]interface{}); ok {
if v, ok := om[mkDiskSize].(int); ok {
oldSize = v
}
}
}

if i < len(newList) && newList[i] != nil {
if nm, ok := newList[i].(map[string]interface{}); ok {
if v, ok := nm[mkDiskSize].(int); ok {
newSize = v
}
}
}

// fmt.Printf("ALEX: check DISK %v: %v vs %v\n", i, oldSize, newSize)
if oldSize > newSize {

// fmt.Print("ALEX: check DISK: new is smaller\n")
_ = d.ForceNew(fmt.Sprintf("%s.%d.%s", mkDisk, i, mkDiskSize))
return true // <-- this is not working
}
}

return false
},
),
),
Importer: &schema.ResourceImporter{
StateContext: func(_ context.Context, d *schema.ResourceData, _ interface{}) ([]*schema.ResourceData, error) {
Expand Down Expand Up @@ -1765,7 +1813,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
}

diskSize := diskBlock[mkDiskSize].(int)
if diskDatastoreID != "" && (diskSize != dvDiskSize || len(mountPoints) > 0) {
if diskDatastoreID != "" && (diskSize != dvDiskSize || len(mountPoints) > 0 || len(diskMountOptions) > 0) {
// This is a special case where the rootfs size is set to a non-default value at creation time.
// see https://pve.proxmox.com/pve-docs/chapter-pct.html#_storage_backed_mount_points
rootFS = &containers.CustomRootFS{
Expand Down Expand Up @@ -2985,17 +3033,42 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
}

rootFS := &containers.CustomRootFS{}
// Disk ID for the rootfs is always 0
diskID := 0
vmID := d.Get(mkVMID).(int)
rootFS.Volume = diskBlock[mkDiskDatastoreID].(string)
rootFS.Volume = getContainerDiskVolume(rootFS.Volume, vmID, diskID)
containerConfig, e := containerAPI.GetContainer(ctx)
if e != nil {
if errors.Is(e, api.ErrResourceDoesNotExist) {
d.SetId("")
return nil
}
return diag.FromErr(e)
}

if containerConfig.RootFS == nil {
return diag.Errorf("RootFS information of container malformed.")
}
rootFS.Volume = containerConfig.RootFS.Volume

acl := types.CustomBool(diskBlock[mkDiskACL].(bool))
mountOptions := diskBlock[mkDiskMountOptions].([]interface{})
quota := types.CustomBool(diskBlock[mkDiskQuota].(bool))
replicate := types.CustomBool(diskBlock[mkDiskReplicate].(bool))

oldSize := containerConfig.RootFS.Size
size := types.DiskSizeFromGigabytes(int64(diskBlock[mkDiskSize].(int)))
if *oldSize > *size {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

critical

There is a potential nil pointer dereference here. oldSize is of type *types.DiskSize and can be nil if the size property is not returned by the Proxmox API for the container's rootfs. Dereferencing it with *oldSize on this line will cause a panic. You should add a nil check for oldSize before dereferencing it.

Suggested change
if *oldSize > *size {
if oldSize != nil && *oldSize > *size {

// TODO: we should never reach this point. The `plan` should recreate the container, not update it.
d.SetId("")
return diag.Errorf("New disk size (%s) has to be greater the current disk (%s)!", oldSize, size)
}

if oldSize != size {
err = containerAPI.ResizeContainerDisk(ctx, &containers.ResizeRequestBody{
Disk: "rootfs",
Size: size.String(),
})
if err != nil {
return diag.FromErr(err)
}
}

rootFS.ACL = &acl
rootFS.Quota = &quota
Expand All @@ -3005,15 +3078,26 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
mountOptionsStrings := make([]string, 0, len(mountOptions))

for _, option := range mountOptions {
mountOptionsStrings = append(mountOptionsStrings, option.(string))
optionString := option.(string)
mountOptionsStrings = append(mountOptionsStrings, optionString)
}

// Always set, including empty, to allow clearing mount options
rootFS.MountOptions = &mountOptionsStrings

updateBody.RootFS = rootFS
// To compare contents regardless of order, we can sort them.
// The schema already uses a suppress func for order, so we should be consistent.
sort.Strings(mountOptionsStrings)
currentMountOptions := containerConfig.RootFS.MountOptions
currentMountOptionsSorted := []string{}
if currentMountOptions != nil {
currentMountOptionsSorted = append(currentMountOptionsSorted, *currentMountOptions...)
}
sort.Strings(currentMountOptionsSorted)
if !slices.Equal(mountOptionsStrings, currentMountOptionsSorted) {
rebootRequired = true
}

rebootRequired = true
updateBody.RootFS = rootFS
}

if d.HasChange(mkFeatures) {
Expand Down Expand Up @@ -3534,10 +3618,6 @@ func parseImportIDWithNodeName(id string) (string, string, error) {
return nodeName, id, nil
}

func getContainerDiskVolume(rawVolume string, vmID int, diskID int) string {
return fmt.Sprintf("%s:vm-%d-disk-%d", rawVolume, vmID, diskID)
}

func skipDnsDiffIfEmpty(k, oldValue, newValue string, d *schema.ResourceData) bool {
dnsDataKey := mkInitialization + ".0." + mkInitializationDNS
if k == dnsDataKey+".#" {
Expand Down