Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
e30cb2d
implement no-limit and max for memory
scott-the-programmer Feb 20, 2025
ce7d2c6
remove unused transform/validate functions
scott-the-programmer Feb 20, 2025
7980799
update test output
scott-the-programmer Feb 20, 2025
664a9da
add tests for memory util
scott-the-programmer Feb 20, 2025
dd3eb7e
regenerate schema
scott-the-programmer Feb 20, 2025
47f5406
ensure quotes are escaped during build
scott-the-programmer Feb 20, 2025
0f3383c
revert cpu type temporarily
scott-the-programmer Feb 20, 2025
ccf8e9f
feat: Add CPU max and no-limit support with new state utility function
scott-the-programmer Feb 28, 2025
1825964
fix: Parse CPU string to integer in GetCPUs function
scott-the-programmer Feb 28, 2025
0781c81
fix: Handle negative CPU count and error cases in GetCPUs function
scott-the-programmer Feb 28, 2025
f7483cd
chore: Import fmt package for potential error formatting in CPU utility
scott-the-programmer Mar 9, 2025
adb77cb
feat: Add memory converter and validator utility functions with compr…
scott-the-programmer Mar 9, 2025
6f40a32
feat: Update minikube cluster schema with memory and CPU type improve…
scott-the-programmer Mar 9, 2025
8f98e37
test: Add unit tests for memory and CPU configuration scenarios in mi…
scott-the-programmer Mar 9, 2025
e9d726e
test: Update test configurations for memory and CPU limit scenarios
scott-the-programmer Mar 9, 2025
19b12d3
test: Enhance cluster tests with memory and CPU configurations, integ…
scott-the-programmer Mar 16, 2025
3eb956e
Merge branch 'main' into max-no-limit
scott-the-programmer Mar 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions minikube/generator/schema_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ var updateFields = []string{
var schemaOverrides map[string]SchemaOverride = map[string]SchemaOverride{
"memory": {
Default: "4g",
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)",
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use \\\"max\\\" to use the maximum amount of memory. Use \\\"no-limit\\\" to not specify a limit (Docker/Podman only))",
Type: String,
StateFunc: "state_utils.ResourceSizeConverter()",
ValidateDiagFunc: "state_utils.ResourceSizeValidator()",
StateFunc: "state_utils.MemoryConverter()",
ValidateDiagFunc: "state_utils.MemoryValidator()",
},
"disk_size": {
Default: "20000mb",
Expand All @@ -71,9 +71,11 @@ var schemaOverrides map[string]SchemaOverride = map[string]SchemaOverride{
ValidateDiagFunc: "state_utils.ResourceSizeValidator()",
},
"cpus": {
Default: "2",
Description: "Amount of CPUs to allocate to Kubernetes",
Type: Int,
Default: "2",
Description: "Number of CPUs allocated to Kubernetes. Use \\\"max\\\" to use the maximum number of CPUs. Use \\\"no-limit\\\" to not specify a limit (Docker/Podman only)",
Type: String,
StateFunc: "state_utils.CPUConverter()",
ValidateDiagFunc: "state_utils.CPUValidator()",
},
// Customize the description to be the fullset of drivers
"driver": {
Expand Down
6 changes: 3 additions & 3 deletions minikube/generator/schema_builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -360,14 +360,14 @@ func TestOverride(t *testing.T) {
assert.Equal(t, header+`
"memory": {
Type: schema.TypeString,
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)",
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use \"max\" to use the maximum amount of memory. Use \"no-limit\" to not specify a limit (Docker/Podman only))",

Optional: true,
ForceNew: true,

Default: "4g",
StateFunc: state_utils.ResourceSizeConverter(),
ValidateDiagFunc: state_utils.ResourceSizeValidator(),
StateFunc: state_utils.MemoryConverter(),
ValidateDiagFunc: state_utils.MemoryValidator(),
},

}
Expand Down
32 changes: 32 additions & 0 deletions minikube/lib/memory.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
package lib

import (
"k8s.io/minikube/pkg/minikube/machine"
)


var NoLimit = "no-limit"
var Max = "max"

// MemoryInfo holds system and container memory information
type MemoryInfo struct {
SystemMemory int
}

// GetMemoryLimits returns the amount of memory allocated to the system and container
// The return values are in MiB
func GetMemoryLimit() (*MemoryInfo, error) {
info, _, memErr, _ := machine.LocalHostInfo()

if memErr != nil {
return nil, memErr
}

// Subtract 1gb for overhead
memInfo := &MemoryInfo{
SystemMemory: int(info.Memory) - 1024,
}

return memInfo, nil
}

10 changes: 8 additions & 2 deletions minikube/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,13 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste
}

memoryStr := d.Get("memory").(string)
memoryMb, err := pkgutil.CalculateSizeInMB(memoryStr)
memoryMb, err := state_utils.GetMemory(memoryStr)
if err != nil {
return nil, err
}

cpuStr := d.Get("cpus").(string)
cpus, err := state_utils.GetCPUs(cpuStr)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -380,7 +386,7 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste
KicBaseImage: d.Get("base_image").(string),
Network: d.Get("network").(string),
Memory: memoryMb,
CPUs: d.Get("cpus").(int),
CPUs: cpus,
DiskSize: diskMb,
Driver: driver,
ListenAddress: d.Get("listen_address").(string),
Expand Down
121 changes: 111 additions & 10 deletions minikube/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"time"

"github.com/scott-the-programmer/terraform-provider-minikube/minikube/lib"
"github.com/scott-the-programmer/terraform-provider-minikube/minikube/state_utils"

"github.com/golang/mock/gomock"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
Expand All @@ -35,12 +36,14 @@ type mockClusterClientProperties struct {
haNodes int
workerNodes int
diskSize int
memory string
cpu string
}

func TestClusterCreation(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreation", 1, 0, 20000}))},
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreation", 1, 0, 20000, "4096mb", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterConfig("some_driver", "TestClusterCreation"),
Expand All @@ -55,7 +58,7 @@ func TestClusterCreation(t *testing.T) {
func TestClusterUpdate(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockUpdate(mockClusterClientProperties{t, "TestClusterUpdate", 1, 0, 20000}))},
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockUpdate(mockClusterClientProperties{t, "TestClusterUpdate", 1, 0, 20000, "4096mb", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterConfig("some_driver", "TestClusterUpdate"),
Expand All @@ -73,7 +76,7 @@ func TestClusterUpdate(t *testing.T) {
func TestClusterHA(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationHA", 3, 5, 20000}))},
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationHA", 3, 5, 20000, "4096mb", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterHAConfig("some_driver", "TestClusterCreationHA"),
Expand All @@ -85,7 +88,7 @@ func TestClusterHA(t *testing.T) {
func TestClusterDisk(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationDisk", 1, 0, 20480}))},
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationDisk", 1, 0, 20480, "4096mb", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterDiskConfig("some_driver", "TestClusterCreationDisk"),
Expand All @@ -97,7 +100,7 @@ func TestClusterDisk(t *testing.T) {
func TestClusterWait(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0, 20000}))},
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0, 20000, "4096mb", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterWaitConfig("some_driver", "TestClusterCreationWait"),
Expand Down Expand Up @@ -326,10 +329,58 @@ func TestClusterCreation_HyperV(t *testing.T) {
})
}

func TestClusterNoLimitMemory(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterNoLimitMemory", 1, 0, 20000, "no-limit", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterNoLimitMemoryConfig("some_driver", "TestClusterNoLimitMemory"),
},
},
})
}

func TestClusterMaxMemory(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterMaxMemory", 1, 0, 20000, "max", "1"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterMaxMemoryConfig("some_driver", "TestClusterMaxMemory"),
},
},
})
}

func TestClusterNoLimitCPU(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterNoLimitCPU", 1, 0, 20000, "4096mb", "no-limit"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterNoLimitCPUConfig("some_driver", "TestClusterNoLimitCPU"),
},
},
})
}

func TestClusterMaxCPU(t *testing.T) {
resource.Test(t, resource.TestCase{
IsUnitTest: true,
Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterMaxCPU", 1, 0, 20000, "4096mb", "max"}))},
Steps: []resource.TestStep{
{
Config: testUnitClusterMaxCPUConfig("some_driver", "TestClusterMaxCPU"),
},
},
})
}

func mockUpdate(props mockClusterClientProperties) schema.ConfigureContextFunc {
ctrl := gomock.NewController(props.t)

mockClusterClient := getBaseMockClient(ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize)
mockClusterClient := getBaseMockClient(props.t, ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize, props.memory, props.cpu)

gomock.InOrder(
mockClusterClient.EXPECT().
Expand Down Expand Up @@ -366,7 +417,7 @@ func mockUpdate(props mockClusterClientProperties) schema.ConfigureContextFunc {
func mockSuccess(props mockClusterClientProperties) schema.ConfigureContextFunc {
ctrl := gomock.NewController(props.t)

mockClusterClient := getBaseMockClient(ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize)
mockClusterClient := getBaseMockClient(props.t, ctrl, props.name, props.haNodes, props.workerNodes, props.diskSize, props.memory, props.cpu)

mockClusterClient.EXPECT().
GetAddons().
Expand All @@ -384,7 +435,7 @@ func mockSuccess(props mockClusterClientProperties) schema.ConfigureContextFunc
return configureContext
}

func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int, workerNodes int, diskSize int) *lib.MockClusterClient {
func getBaseMockClient(t *testing.T, ctrl *gomock.Controller, clusterName string, haNodes int, workerNodes int, diskSize int, memory string, cpu string) *lib.MockClusterClient {
mockClusterClient := lib.NewMockClusterClient(ctrl)

os.Mkdir("test_output", 0755)
Expand Down Expand Up @@ -424,6 +475,16 @@ func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int,
Worker: true,
}

mem, err := state_utils.GetMemory(memory)
if err != nil {
t.Fatalf("Failed to get memory: %v", err)
}

c, err := state_utils.GetCPUs(cpu)
if err != nil {
t.Fatalf("Failed to get cpu: %v", err)
}

cc := config.ClusterConfig{
Name: "terraform-provider-minikube-acc",
APIServerPort: clusterSchema["apiserver_port"].Default.(int),
Expand All @@ -432,8 +493,8 @@ func getBaseMockClient(ctrl *gomock.Controller, clusterName string, haNodes int,
MinikubeISO: defaultIso,
KicBaseImage: clusterSchema["base_image"].Default.(string),
Network: clusterSchema["network"].Default.(string),
Memory: 4096,
CPUs: 2,
Memory: mem,
CPUs: c,
DiskSize: diskSize,
Driver: "some_driver",
ListenAddress: clusterSchema["listen_address"].Default.(string),
Expand Down Expand Up @@ -827,3 +888,43 @@ func testPropertyExists(n string, id string) resource.TestCheckFunc {
return nil
}
}

func testUnitClusterNoLimitMemoryConfig(driver string, clusterName string) string {
return fmt.Sprintf(`
resource "minikube_cluster" "new" {
driver = "%s"
cluster_name = "%s"
memory = "no-limit"
}
`, driver, clusterName)
}

func testUnitClusterMaxMemoryConfig(driver string, clusterName string) string {
return fmt.Sprintf(`
resource "minikube_cluster" "new" {
driver = "%s"
cluster_name = "%s"
memory = "max"
}
`, driver, clusterName)
}

func testUnitClusterNoLimitCPUConfig(driver string, clusterName string) string {
return fmt.Sprintf(`
resource "minikube_cluster" "new" {
driver = "%s"
cluster_name = "%s"
cpus = "no-limit"
}
`, driver, clusterName)
}

func testUnitClusterMaxCPUConfig(driver string, clusterName string) string {
return fmt.Sprintf(`
resource "minikube_cluster" "new" {
driver = "%s"
cluster_name = "%s"
cpus = "max"
}
`, driver, clusterName)
}
20 changes: 11 additions & 9 deletions minikube/schema_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ var (
Optional: true,
ForceNew: true,

Default: "gcr.io/k8s-minikube/kicbase:v0.0.45@sha256:81df288595202a317b1a4dc2506ca2e4ed5f22373c19a441b88cfbf4b9867c85",
Default: "gcr.io/k8s-minikube/kicbase:v0.0.46@sha256:fd2d445ddcc33ebc5c6b68a17e6219ea207ce63c005095ea1525296da2d1a279",
},

"binary_mirror": {
Expand Down Expand Up @@ -191,13 +191,15 @@ var (
},

"cpus": {
Type: schema.TypeInt,
Description: "Amount of CPUs to allocate to Kubernetes",
Type: schema.TypeString,
Description: "Number of CPUs allocated to Kubernetes. Use \"max\" to use the maximum number of CPUs. Use \"no-limit\" to not specify a limit (Docker/Podman only)",

Optional: true,
ForceNew: true,

Default: 2,
Default: "2",
StateFunc: state_utils.CPUConverter(),
ValidateDiagFunc: state_utils.CPUValidator(),
},

"cri_socket": {
Expand Down Expand Up @@ -413,7 +415,7 @@ var (

"gpus": {
Type: schema.TypeString,
Description: "Allow pods to use your NVIDIA GPUs. Options include: [all,nvidia] (Docker driver with Docker container-runtime only)",
Description: "Allow pods to use your GPUs. Options include: [all,nvidia,amd] (Docker driver with Docker container-runtime only)",

Optional: true,
ForceNew: true,
Expand Down Expand Up @@ -598,7 +600,7 @@ var (

"kubernetes_version": {
Type: schema.TypeString,
Description: "The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.31.0, 'latest' for v1.31.0). Defaults to 'stable'.",
Description: "The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.32.0, 'latest' for v1.32.0). Defaults to 'stable'.",

Optional: true,
ForceNew: true,
Expand Down Expand Up @@ -668,14 +670,14 @@ var (

"memory": {
Type: schema.TypeString,
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>(case-insensitive)], where unit = b, k, kb, m, mb, g or gb)",
Description: "Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g). Use \"max\" to use the maximum amount of memory. Use \"no-limit\" to not specify a limit (Docker/Podman only))",

Optional: true,
ForceNew: true,

Default: "4g",
StateFunc: state_utils.ResourceSizeConverter(),
ValidateDiagFunc: state_utils.ResourceSizeValidator(),
StateFunc: state_utils.MemoryConverter(),
ValidateDiagFunc: state_utils.MemoryValidator(),
},

"mount": {
Expand Down
Loading