diff --git a/pkg/driver/helpers.go b/pkg/driver/helpers.go index 1d00ade..b2f4e26 100644 --- a/pkg/driver/helpers.go +++ b/pkg/driver/helpers.go @@ -9,14 +9,15 @@ import ( "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/scaleway/scaleway-csi/pkg/scaleway" block "github.com/scaleway/scaleway-sdk-go/api/block/v1" + "github.com/scaleway/scaleway-sdk-go/api/instance/v1" "github.com/scaleway/scaleway-sdk-go/scw" - "golang.org/x/exp/slices" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" @@ -520,3 +521,29 @@ func uint64ToInt64(v uint64) int64 { return int64(v) } + +// attachedScratchVolumes returns the number of attached scratch volumes, based +// on the instance metadata. +func attachedScratchVolumes(md *instance.Metadata) int { + var count int + + for _, vol := range md.Volumes { + if vol.VolumeType == "scratch" { + count++ + } + } + + return count +} + +// maxVolumesPerNode returns the maximum number of volumes that can be attached to a node, +// after substracting the system root volume and the provided number of reserved volumes. +// It returns an error if the result is 0 or less. +func maxVolumesPerNode(reservedCount int) (int64, error) { + max := scaleway.MaxVolumesPerNode - reservedCount - 1 + if max <= 0 { + return 0, fmt.Errorf("max number of volumes that can be attached to this node must be at least 1, currently is %d", max) + } + + return int64(max), nil +} diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 3256065..2e238d7 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -25,8 +25,9 @@ type nodeService struct { diskUtils DiskUtils - nodeID string - nodeZone scw.Zone + nodeID string + nodeZone scw.Zone + maxVolumesPerNode int64 } func newNodeService() (*nodeService, error) { @@ -40,10 +41,16 @@ func newNodeService() (*nodeService, error) { return nil, fmt.Errorf("invalid zone in metadata: %w", err) } + maxVolumesPerNode, err := maxVolumesPerNode(attachedScratchVolumes(metadata)) + if err != nil { + return nil, err + } + return &nodeService{ - diskUtils: newDiskUtils(), - nodeID: metadata.ID, - nodeZone: zone, + diskUtils: newDiskUtils(), + nodeID: metadata.ID, + nodeZone: zone, + maxVolumesPerNode: maxVolumesPerNode, }, nil } @@ -431,7 +438,7 @@ func (d *nodeService) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetC func (d *nodeService) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { return &csi.NodeGetInfoResponse{ NodeId: d.nodeZone.String() + "/" + d.nodeID, - MaxVolumesPerNode: scaleway.MaxVolumesPerNode - 1, // One is already used by the l_ssd or b_ssd root volume + MaxVolumesPerNode: d.maxVolumesPerNode, AccessibleTopology: &csi.Topology{ Segments: map[string]string{ ZoneTopologyKey: d.nodeZone.String(),