diff --git a/minikube/generator/schema_builder.go b/minikube/generator/schema_builder.go index 366d799..8d60741 100644 --- a/minikube/generator/schema_builder.go +++ b/minikube/generator/schema_builder.go @@ -58,10 +58,10 @@ var updateFields = []string{ var schemaOverrides map[string]SchemaOverride = map[string]SchemaOverride{ "memory": { Default: "4g", - Description: "Amount of RAM to allocate to Kubernetes (format: [(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)", + Description: "Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use \\\"max\\\" to use the maximum amount of memory. Use \\\"no-limit\\\" to not specify a limit (Docker/Podman only))", Type: String, - StateFunc: "state_utils.ResourceSizeConverter()", - ValidateDiagFunc: "state_utils.ResourceSizeValidator()", + StateFunc: "state_utils.MemoryConverter()", + ValidateDiagFunc: "state_utils.MemoryValidator()", }, "disk_size": { Default: "20000mb", @@ -71,9 +71,11 @@ var schemaOverrides map[string]SchemaOverride = map[string]SchemaOverride{ ValidateDiagFunc: "state_utils.ResourceSizeValidator()", }, "cpus": { - Default: "2", - Description: "Amount of CPUs to allocate to Kubernetes", - Type: Int, + Default: "2", + Description: "Number of CPUs allocated to Kubernetes. Use \\\"max\\\" to use the maximum number of CPUs. Use \\\"no-limit\\\" to not specify a limit (Docker/Podman only)", + Type: String, + StateFunc: "state_utils.CPUConverter()", + ValidateDiagFunc: "state_utils.CPUValidator()", }, // Customize the description to be the fullset of drivers "driver": { diff --git a/minikube/generator/schema_builder_test.go b/minikube/generator/schema_builder_test.go index dda5ebe..889d7ec 100644 --- a/minikube/generator/schema_builder_test.go +++ b/minikube/generator/schema_builder_test.go @@ -360,14 +360,14 @@ func TestOverride(t *testing.T) { assert.Equal(t, header+` "memory": { Type: schema.TypeString, - Description: "Amount of RAM to allocate to Kubernetes (format: [(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)", + Description: "Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use \"max\" to use the maximum amount of memory. Use \"no-limit\" to not specify a limit (Docker/Podman only))", Optional: true, ForceNew: true, Default: "4g", - StateFunc: state_utils.ResourceSizeConverter(), - ValidateDiagFunc: state_utils.ResourceSizeValidator(), + StateFunc: state_utils.MemoryConverter(), + ValidateDiagFunc: state_utils.MemoryValidator(), }, } diff --git a/minikube/lib/memory.go b/minikube/lib/memory.go new file mode 100644 index 0000000..d8bc17d --- /dev/null +++ b/minikube/lib/memory.go @@ -0,0 +1,32 @@ +package lib + +import ( + "k8s.io/minikube/pkg/minikube/machine" +) + + +var NoLimit = "no-limit" +var Max = "max" + +// MemoryInfo holds system and container memory information +type MemoryInfo struct { + SystemMemory int +} + +// GetMemoryLimits returns the amount of memory allocated to the system and container +// The return values are in MiB +func GetMemoryLimit() (*MemoryInfo, error) { + info, _, memErr, _ := machine.LocalHostInfo() + + if memErr != nil { + return nil, memErr + } + + // Subtract 1gb for overhead + memInfo := &MemoryInfo{ + SystemMemory: int(info.Memory) - 1024, + } + + return memInfo, nil +} + diff --git a/minikube/resource_cluster.go b/minikube/resource_cluster.go index 13ad323..8f9396c 100644 --- a/minikube/resource_cluster.go +++ b/minikube/resource_cluster.go @@ -271,7 +271,13 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste } memoryStr := d.Get("memory").(string) - memoryMb, err := pkgutil.CalculateSizeInMB(memoryStr) + memoryMb, err := state_utils.GetMemory(memoryStr) + if err != nil { + return nil, err + } + + cpuStr := d.Get("cpus").(string) + cpus, err := state_utils.GetCPUs(cpuStr) if err != nil { return nil, err } @@ -380,7 +386,7 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste KicBaseImage: d.Get("base_image").(string), Network: d.Get("network").(string), Memory: memoryMb, - CPUs: d.Get("cpus").(int), + CPUs: cpus, DiskSize: diskMb, Driver: driver, ListenAddress: d.Get("listen_address").(string), diff --git a/minikube/resource_cluster_test.go b/minikube/resource_cluster_test.go index 505c356..35fe888 100644 --- a/minikube/resource_cluster_test.go +++ b/minikube/resource_cluster_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib" + "github.com/scott-the-programmer/terraform-provider-minikube/minikube/state_utils" "github.com/golang/mock/gomock" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -35,12 +36,14 @@ type mockClusterClientProperties struct { haNodes int workerNodes int diskSize int + memory string + cpu string } func TestClusterCreation(t *testing.T) { resource.Test(t, resource.TestCase{ IsUnitTest: true, - Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreation", 1, 0, 20000}))}, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreation", 1, 0, 20000, "4096mb", "1"}))}, Steps: []resource.TestStep{ { Config: testUnitClusterConfig("some_driver", "TestClusterCreation"), @@ -55,7 +58,7 @@ func TestClusterCreation(t *testing.T) { func TestClusterUpdate(t *testing.T) { resource.Test(t, resource.TestCase{ IsUnitTest: true, - Providers: map[string]*schema.Provider{"minikube": NewProvider(mockUpdate(mockClusterClientProperties{t, "TestClusterUpdate", 1, 0, 20000}))}, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockUpdate(mockClusterClientProperties{t, "TestClusterUpdate", 1, 0, 20000, "4096mb", "1"}))}, Steps: []resource.TestStep{ { Config: testUnitClusterConfig("some_driver", "TestClusterUpdate"), @@ -73,7 +76,7 @@ func TestClusterUpdate(t *testing.T) { func TestClusterHA(t *testing.T) { resource.Test(t, resource.TestCase{ IsUnitTest: true, - Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationHA", 3, 5, 20000}))}, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationHA", 3, 5, 20000, "4096mb", "1"}))}, Steps: []resource.TestStep{ { Config: testUnitClusterHAConfig("some_driver", "TestClusterCreationHA"), @@ -85,7 +88,7 @@ func TestClusterHA(t *testing.T) { func TestClusterDisk(t *testing.T) { resource.Test(t, resource.TestCase{ IsUnitTest: true, - Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationDisk", 1, 0, 20480}))}, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationDisk", 1, 0, 20480, "4096mb", "1"}))}, Steps: []resource.TestStep{ { Config: testUnitClusterDiskConfig("some_driver", "TestClusterCreationDisk"), @@ -97,7 +100,7 @@ func TestClusterDisk(t *testing.T) { func TestClusterWait(t *testing.T) { resource.Test(t, resource.TestCase{ IsUnitTest: true, - Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0, 20000}))}, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0, 20000, "4096mb", "1"}))}, Steps: []resource.TestStep{ { Config: testUnitClusterWaitConfig("some_driver", "TestClusterCreationWait"), @@ -326,10 +329,58 @@ func TestClusterCreation_HyperV(t *testing.T) { }) } +func TestClusterNoLimitMemory(t *testing.T) { + resource.Test(t, resource.TestCase{ + IsUnitTest: true, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterNoLimitMemory", 1, 0, 20000, "no-limit", "1"}))}, + Steps: []resource.TestStep{ + { + Config: testUnitClusterNoLimitMemoryConfig("some_driver", "TestClusterNoLimitMemory"), + }, + }, + }) +} + +func TestClusterMaxMemory(t *testing.T) { + resource.Test(t, resource.TestCase{ + IsUnitTest: true, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterMaxMemory", 1, 0, 20000, "max", "1"}))}, + Steps: []resource.TestStep{ + { + Config: testUnitClusterMaxMemoryConfig("some_driver", "TestClusterMaxMemory"), + }, + }, + }) +} + +func TestClusterNoLimitCPU(t *testing.T) { + resource.Test(t, resource.TestCase{ + IsUnitTest: true, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterNoLimitCPU", 1, 0, 20000, "4096mb", "no-limit"}))}, + Steps: []resource.TestStep{ + { + Config: testUnitClusterNoLimitCPUConfig("some_driver", "TestClusterNoLimitCPU"), + }, + }, + }) +} + +func TestClusterMaxCPU(t *testing.T) { + resource.Test(t, resource.TestCase{ + IsUnitTest: true, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterMaxCPU", 1, 0, 20000, "4096mb", "max"}))}, + Steps: []resource.TestStep{ + { + Config: testUnitClusterMaxCPUConfig("some_driver", "TestClusterMaxCPU"), + }, + }, + }) +} + func mockUpdate(props mockClusterClientProperties) schema.ConfigureContextFunc { ctrl := gomock.NewController(props.t) - mockClusterClient := getBaseMockClient(ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize) + mockClusterClient := getBaseMockClient(props.t, ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize, props.memory, props.cpu) gomock.InOrder( mockClusterClient.EXPECT(). @@ -366,7 +417,7 @@ func mockUpdate(props mockClusterClientProperties) schema.ConfigureContextFunc { func mockSuccess(props mockClusterClientProperties) schema.ConfigureContextFunc { ctrl := gomock.NewController(props.t) - mockClusterClient := getBaseMockClient(ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize) + mockClusterClient := getBaseMockClient(props.t, ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize, props.memory, props.cpu) mockClusterClient.EXPECT(). GetAddons(). @@ -384,7 +435,7 @@ func mockSuccess(props mockClusterClientProperties) schema.ConfigureContextFunc return configureContext } -func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int, workerNodes int, diskSize int) *lib.MockClusterClient { +func getBaseMockClient(t *testing.T, ctrl *gomock.Controller, clusterName string, haNodes int, workerNodes int, diskSize int, memory string, cpu string) *lib.MockClusterClient { mockClusterClient := lib.NewMockClusterClient(ctrl) os.Mkdir("test_output", 0755) @@ -424,6 +475,16 @@ func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int, Worker: true, } + mem, err := state_utils.GetMemory(memory) + if err != nil { + t.Fatalf("Failed to get memory: %v", err) + } + + c, err := state_utils.GetCPUs(cpu) + if err != nil { + t.Fatalf("Failed to get cpu: %v", err) + } + cc := config.ClusterConfig{ Name: "terraform-provider-minikube-acc", APIServerPort: clusterSchema["apiserver_port"].Default.(int), @@ -432,8 +493,8 @@ func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int, MinikubeISO: defaultIso, KicBaseImage: clusterSchema["base_image"].Default.(string), Network: clusterSchema["network"].Default.(string), - Memory: 4096, - CPUs: 2, + Memory: mem, + CPUs: c, DiskSize: diskSize, Driver: "some_driver", ListenAddress: clusterSchema["listen_address"].Default.(string), @@ -827,3 +888,43 @@ func testPropertyExists(n string, id string) resource.TestCheckFunc { return nil } } + +func testUnitClusterNoLimitMemoryConfig(driver string, clusterName string) string { + return fmt.Sprintf(` + resource "minikube_cluster" "new" { + driver = "%s" + cluster_name = "%s" + memory = "no-limit" + } + `, driver, clusterName) +} + +func testUnitClusterMaxMemoryConfig(driver string, clusterName string) string { + return fmt.Sprintf(` + resource "minikube_cluster" "new" { + driver = "%s" + cluster_name = "%s" + memory = "max" + } + `, driver, clusterName) +} + +func testUnitClusterNoLimitCPUConfig(driver string, clusterName string) string { + return fmt.Sprintf(` + resource "minikube_cluster" "new" { + driver = "%s" + cluster_name = "%s" + cpus = "no-limit" + } + `, driver, clusterName) +} + +func testUnitClusterMaxCPUConfig(driver string, clusterName string) string { + return fmt.Sprintf(` + resource "minikube_cluster" "new" { + driver = "%s" + cluster_name = "%s" + cpus = "max" + } + `, driver, clusterName) +} diff --git a/minikube/schema_cluster.go b/minikube/schema_cluster.go index aefda1c..031c6c8 100644 --- a/minikube/schema_cluster.go +++ b/minikube/schema_cluster.go @@ -137,7 +137,7 @@ var ( Optional: true, ForceNew: true, - Default: "gcr.io/k8s-minikube/kicbase:v0.0.45@sha256:81df288595202a317b1a4dc2506ca2e4ed5f22373c19a441b88cfbf4b9867c85", + Default: "gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279", }, "binary_mirror": { @@ -191,13 +191,15 @@ var ( }, "cpus": { - Type: schema.TypeInt, - Description: "Amount of CPUs to allocate to Kubernetes", + Type: schema.TypeString, + Description: "Number of CPUs allocated to Kubernetes. Use \"max\" to use the maximum number of CPUs. Use \"no-limit\" to not specify a limit (Docker/Podman only)", Optional: true, ForceNew: true, - Default: 2, + Default: "2", + StateFunc: state_utils.CPUConverter(), + ValidateDiagFunc: state_utils.CPUValidator(), }, "cri_socket": { @@ -413,7 +415,7 @@ var ( "gpus": { Type: schema.TypeString, - Description: "Allow pods to use your NVIDIA GPUs. Options include: [all,nvidia] (Docker driver with Docker container-runtime only)", + Description: "Allow pods to use your GPUs. Options include: [all,nvidia,amd] (Docker driver with Docker container-runtime only)", Optional: true, ForceNew: true, @@ -598,7 +600,7 @@ var ( "kubernetes_version": { Type: schema.TypeString, - Description: "The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.31.0, 'latest' for v1.31.0). Defaults to 'stable'.", + Description: "The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.32.0, 'latest' for v1.32.0). Defaults to 'stable'.", Optional: true, ForceNew: true, @@ -668,14 +670,14 @@ var ( "memory": { Type: schema.TypeString, - Description: "Amount of RAM to allocate to Kubernetes (format: [(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)", + Description: "Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use \"max\" to use the maximum amount of memory. Use \"no-limit\" to not specify a limit (Docker/Podman only))", Optional: true, ForceNew: true, Default: "4g", - StateFunc: state_utils.ResourceSizeConverter(), - ValidateDiagFunc: state_utils.ResourceSizeValidator(), + StateFunc: state_utils.MemoryConverter(), + ValidateDiagFunc: state_utils.MemoryValidator(), }, "mount": { diff --git a/minikube/state_utils/cpu.go b/minikube/state_utils/cpu.go new file mode 100644 index 0000000..e71b4f8 --- /dev/null +++ b/minikube/state_utils/cpu.go @@ -0,0 +1,85 @@ +package state_utils + +import ( + "errors" + "fmt" + "runtime" + "strconv" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib" +) + +func GetCPUs(cpuStr string) (int, error) { + if cpuStr == lib.Max { + return runtime.NumCPU(), nil + } else if cpuStr == lib.NoLimit { + return 0, nil + } + cpus, err := strconv.Atoi(cpuStr) + if err != nil { + return 0, err + } + if cpus < 0 { + return 0, fmt.Errorf("CPU count cannot be negative: %d", cpus) + } + return cpus, nil +} + +func CPUConverter() schema.SchemaStateFunc { + return func(val interface{}) string { + result, err := CPUConverterImpl(val) + if err != nil { + panic(err) + } + return result + } +} + +func CPUConverterImpl(val interface{}) (string, error) { + cpuStr, ok := val.(string) + if !ok { + return "", errors.New("cpu value is not a string") + } + + cpus, err := GetCPUs(cpuStr) + if err != nil { + return "", err + } + + return strconv.Itoa(cpus), nil +} + +func CPUValidator() schema.SchemaValidateDiagFunc { + return schema.SchemaValidateDiagFunc(func(val interface{}, path cty.Path) diag.Diagnostics { + err := CPUValidatorImpl(val) + if err != nil { + return diag.FromErr(err) + } + return nil + }) +} + +func CPUValidatorImpl(val interface{}) error { + cpuStr, ok := val.(string) + if !ok { + return errors.New("cpu value is not a string") + } + + if cpuStr == lib.Max || cpuStr == lib.NoLimit { + return nil + } + + cpus, err := strconv.Atoi(cpuStr) + if err != nil { + return fmt.Errorf("invalid CPU value: %v", err) + } + + if cpus <= 0 { + return fmt.Errorf("CPU count must be positive: %d", cpus) + } + + return nil +} diff --git a/minikube/state_utils/cpu_test.go b/minikube/state_utils/cpu_test.go new file mode 100644 index 0000000..1a950ca --- /dev/null +++ b/minikube/state_utils/cpu_test.go @@ -0,0 +1,183 @@ +package state_utils + +import ( + "runtime" + "strconv" + "testing" + + "github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib" + "github.com/stretchr/testify/assert" +) + +func TestGetCPUs(t *testing.T) { + tests := []struct { + name string + input string + expected int + expectError bool + }{ + { + name: "valid CPU count", + input: "2", + expected: 2, + expectError: false, + }, + { + name: "max CPUs", + input: lib.Max, + expected: runtime.NumCPU(), + expectError: false, + }, + { + name: "no limit case", + input: lib.NoLimit, + expected: 0, + expectError: false, + }, + { + name: "invalid CPU count", + input: "invalid", + expected: 0, + expectError: true, + }, + { + name: "negative CPU count", + input: "-1", + expected: 0, + expectError: true, + }, + { + name: "empty string", + input: "", + expected: 0, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := GetCPUs(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} + +func TestCPUConverterImpl(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + expectError bool + }{ + { + name: "valid CPU count", + input: "2", + expected: "2", + expectError: false, + }, + { + name: "max CPUs", + input: lib.Max, + expected: strconv.Itoa(runtime.NumCPU()), + expectError: false, + }, + { + name: "no limit case", + input: lib.NoLimit, + expected: "0", + expectError: false, + }, + { + name: "non-string input", + input: 42, + expected: "", + expectError: true, + }, + { + name: "invalid CPU string", + input: "invalid", + expected: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := CPUConverterImpl(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} + +func TestCPUValidatorImpl(t *testing.T) { + tests := []struct { + name string + input interface{} + expectError bool + }{ + { + name: "valid CPU count", + input: "2", + expectError: false, + }, + { + name: "max CPUs", + input: lib.Max, + expectError: false, + }, + { + name: "no limit case", + input: lib.NoLimit, + expectError: false, + }, + { + name: "non-string input", + input: 42, + expectError: true, + }, + { + name: "invalid CPU string", + input: "invalid", + expectError: true, + }, + { + name: "zero CPU count", + input: "0", + expectError: true, + }, + { + name: "negative CPU count", + input: "-1", + expectError: true, + }, + { + name: "empty string", + input: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := CPUValidatorImpl(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/minikube/state_utils/memory.go b/minikube/state_utils/memory.go new file mode 100644 index 0000000..832eb21 --- /dev/null +++ b/minikube/state_utils/memory.go @@ -0,0 +1,93 @@ +package state_utils + +import ( + "errors" + "strconv" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib" + pkgutil "k8s.io/minikube/pkg/util" +) + +func GetMemory(memoryStr string) (int, error) { + var memoryMb int + var err error + if memoryStr == lib.Max { + memoryInfo, err := lib.GetMemoryLimit() + if err != nil { + return 0, err + } + + memoryMb = memoryInfo.SystemMemory + } else if memoryStr == lib.NoLimit { + memoryMb = 0 + } else { + err = ResourceSizeValidatorImpl(memoryStr) + if err != nil { + return 0, err + } + + memoryStr, err = ResourceSizeConverterImpl(memoryStr) + if err != nil { + return 0, err + } + + memoryMb, err = pkgutil.CalculateSizeInMB(memoryStr) + if err != nil { + return 0, err + } + } + + return memoryMb, err +} + +func MemoryConverter() schema.SchemaStateFunc { + return func(val interface{}) string { + result, err := MemoryConverterImpl(val) + if err != nil { + panic(err) + } + return result + } +} + +func MemoryConverterImpl(val interface{}) (string, error) { + memoryStr, ok := val.(string) + if !ok { + return "", errors.New("memory value is not a string") + } + + mem, err := GetMemory(memoryStr) + if err != nil { + return "", err + } + + memoryStr = strconv.Itoa(mem) + "mb" + + return memoryStr, err +} + +func MemoryValidator() schema.SchemaValidateDiagFunc { + return schema.SchemaValidateDiagFunc(func(val interface{}, path cty.Path) diag.Diagnostics { + err := MemoryValidatorImpl(val) + if err != nil { + return diag.FromErr(err) + } + return nil + }) +} + +func MemoryValidatorImpl(val interface{}) error { + memoryStr, ok := val.(string) + if !ok { + return errors.New("memory value is not a string") + } + + if memoryStr == lib.Max || memoryStr == lib.NoLimit { + return nil + } + + return ResourceSizeValidatorImpl(memoryStr) +} diff --git a/minikube/state_utils/memory_test.go b/minikube/state_utils/memory_test.go new file mode 100644 index 0000000..fe65d06 --- /dev/null +++ b/minikube/state_utils/memory_test.go @@ -0,0 +1,206 @@ +package state_utils + +import ( + "testing" + + "github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib" + "github.com/stretchr/testify/assert" +) + +func TestGetMemory(t *testing.T) { + tests := []struct { + name string + input string + expected int + expectError bool + }{ + { + name: "valid memory size - 2G", + input: "2G", + expected: 2048, + expectError: false, + }, + { + name: "valid memory size - 1024mb", + input: "1024mb", + expected: 1024, + expectError: false, + }, + { + name: "no limit case", + input: lib.NoLimit, + expected: 0, + expectError: false, + }, + { + name: "invalid memory size", + input: "invalid", + expected: 0, + expectError: true, + }, + { + name: "negative memory size", + input: "-1G", + expected: 0, + expectError: true, + }, + { + name: "empty string", + input: "", + expected: 0, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := GetMemory(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} + +func TestMemoryConverterImpl(t *testing.T) { + tests := []struct { + name string + input interface{} + expected string + expectError bool + }{ + { + name: "valid memory size - 2G", + input: "2G", + expected: "2048mb", + expectError: false, + }, + { + name: "valid memory size - 1024mb", + input: "1024mb", + expected: "1024mb", + expectError: false, + }, + { + name: "non-string input", + input: 123, + expected: "", + expectError: true, + }, + { + name: "invalid memory size", + input: "invalid", + expected: "", + expectError: true, + }, + { + name: "empty string", + input: "", + expected: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := MemoryConverterImpl(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} + +func TestMemoryValidatorImpl(t *testing.T) { + tests := []struct { + name string + input interface{} + expectError bool + }{ + { + name: "valid memory size - 2G", + input: "2G", + expectError: false, + }, + { + name: "valid memory size - 1024mb", + input: "1024mb", + expectError: false, + }, + { + name: "max case", + input: lib.Max, + expectError: false, + }, + { + name: "no limit case", + input: lib.NoLimit, + expectError: false, + }, + { + name: "non-string input", + input: 123, + expectError: true, + }, + { + name: "invalid memory size", + input: "invalid", + expectError: true, + }, + { + name: "negative memory size", + input: "-1G", + expectError: true, + }, + { + name: "empty string", + input: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := MemoryValidatorImpl(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestMemoryConverter(t *testing.T) { + converter := MemoryConverter() + + // Test normal case + result := converter("2G") + assert.Equal(t, "2048mb", result) + + // Test panic case + assert.Panics(t, func() { + converter(123) // non-string input should panic + }) +} + +func TestMemoryValidator(t *testing.T) { + validator := MemoryValidator() + + // Test valid cases + assert.Nil(t, validator("2G", nil)) + + // Test invalid cases + assert.NotNil(t, validator(123, nil)) // non-string input + assert.NotNil(t, validator("invalid", nil)) // invalid memory size + assert.NotNil(t, validator("", nil)) // empty string +} diff --git a/minikube/state_utils/resource_size.go b/minikube/state_utils/resource_size.go index e211c06..a31e29d 100644 --- a/minikube/state_utils/resource_size.go +++ b/minikube/state_utils/resource_size.go @@ -13,31 +13,46 @@ import ( func ResourceSizeConverter() schema.SchemaStateFunc { return func(val interface{}) string { - size, ok := val.(string) - if !ok { - panic(errors.New("resource size is not a string")) - } - sizeMb, err := pkgutil.CalculateSizeInMB(size) + result, err := ResourceSizeConverterImpl(val) if err != nil { - panic(errors.New("invalid resource size value")) + panic(err) } + return result + } +} - return strconv.Itoa(sizeMb) + "mb" +func ResourceSizeConverterImpl(val interface{}) (string, error) { + size, ok := val.(string) + if !ok { + return "", errors.New("resource size is not a string") } + sizeMb, err := pkgutil.CalculateSizeInMB(size) + if err != nil { + return "", errors.New("invalid resource size value") + } + + return strconv.Itoa(sizeMb) + "mb", nil } func ResourceSizeValidator() schema.SchemaValidateDiagFunc { return schema.SchemaValidateDiagFunc(func(val interface{}, path cty.Path) diag.Diagnostics { - size, ok := val.(string) - if !ok { - diag := diag.FromErr(errors.New("resource size is not a string")) - return diag - } - _, err := pkgutil.CalculateSizeInMB(size) + err := ResourceSizeValidatorImpl(val) if err != nil { - diag := diag.FromErr(errors.New("invalid resource size value")) - return diag + return diag.FromErr(err) } return nil + }) } + +func ResourceSizeValidatorImpl(val interface{}) error { + size, ok := val.(string) + if !ok { + return errors.New("resource size is not a string") + } + _, err := pkgutil.CalculateSizeInMB(size) + if err != nil { + return errors.New("invalid resource size value") + } + return nil +}