Skip to content

Commit 3b45cdc

Browse files
committed
Migrate plugins to k8s.io/kube-scheduler/framework
Signed-off-by: Antonin Stefanutti <antonin@stefanutti.fr>
1 parent 78df594 commit 3b45cdc

36 files changed

+553
-513
lines changed

pkg/capacityscheduling/capacity_scheduling.go

Lines changed: 84 additions & 81 deletions
Large diffs are not rendered by default.

pkg/capacityscheduling/capacity_scheduling_test.go

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"testing"
2424

2525
"k8s.io/apimachinery/pkg/util/sets"
26+
fwk "k8s.io/kube-scheduler/framework"
2627

2728
gocmp "github.com/google/go-cmp/cmp"
2829
v1 "k8s.io/api/core/v1"
@@ -67,7 +68,7 @@ func TestPreFilter(t *testing.T) {
6768
name string
6869
podInfos []podInfo
6970
elasticQuotas map[string]*ElasticQuotaInfo
70-
expected []framework.Code
71+
expected []fwk.Code
7172
}{
7273
{
7374
name: "pod subjects to ElasticQuota",
@@ -89,9 +90,9 @@ func TestPreFilter(t *testing.T) {
8990
},
9091
},
9192
},
92-
expected: []framework.Code{
93-
framework.Success,
94-
framework.Unschedulable,
93+
expected: []fwk.Code{
94+
fwk.Success,
95+
fwk.Unschedulable,
9596
},
9697
},
9798
{
@@ -125,8 +126,8 @@ func TestPreFilter(t *testing.T) {
125126
},
126127
},
127128
},
128-
expected: []framework.Code{
129-
framework.Unschedulable,
129+
expected: []fwk.Code{
130+
fwk.Unschedulable,
130131
},
131132
},
132133
{
@@ -135,8 +136,8 @@ func TestPreFilter(t *testing.T) {
135136
{podName: "ns2-p1", podNamespace: "ns2", memReq: 500},
136137
},
137138
elasticQuotas: map[string]*ElasticQuotaInfo{},
138-
expected: []framework.Code{
139-
framework.Success,
139+
expected: []fwk.Code{
140+
fwk.Success,
140141
},
141142
},
142143
}
@@ -175,7 +176,7 @@ func TestPreFilter(t *testing.T) {
175176

176177
state := framework.NewCycleState()
177178
for i := range pods {
178-
if _, got := cs.PreFilter(context.TODO(), state, pods[i]); got.Code() != tt.expected[i] {
179+
if _, got := cs.PreFilter(context.TODO(), state, pods[i], nil); got.Code() != tt.expected[i] {
179180
t.Errorf("expected %v, got %v : %v", tt.expected[i], got.Code(), got.Message())
180181
}
181182
}
@@ -193,7 +194,7 @@ func TestPostFilter(t *testing.T) {
193194
filteredNodesReader framework.NodeToStatusReader
194195
elasticQuotas map[string]*ElasticQuotaInfo
195196
wantResult *framework.PostFilterResult
196-
wantStatus *framework.Status
197+
wantStatus *fwk.Status
197198
}{
198199
{
199200
name: "in-namespace preemption",
@@ -234,7 +235,7 @@ func TestPostFilter(t *testing.T) {
234235
},
235236
},
236237
wantResult: framework.NewPostFilterResultWithNominatedNode("node-a"),
237-
wantStatus: framework.NewStatus(framework.Success),
238+
wantStatus: fwk.NewStatus(fwk.Success),
238239
},
239240
{
240241
name: "cross-namespace preemption",
@@ -275,7 +276,7 @@ func TestPostFilter(t *testing.T) {
275276
},
276277
},
277278
wantResult: framework.NewPostFilterResultWithNominatedNode("node-a"),
278-
wantStatus: framework.NewStatus(framework.Success),
279+
wantStatus: fwk.NewStatus(fwk.Success),
279280
},
280281
{
281282
name: "without elasticQuotas",
@@ -291,7 +292,7 @@ func TestPostFilter(t *testing.T) {
291292
filteredNodesReader: makeUnschedulableNodeStatusReader(),
292293
elasticQuotas: map[string]*ElasticQuotaInfo{},
293294
wantResult: framework.NewPostFilterResultWithNominatedNode("node-a"),
294-
wantStatus: framework.NewStatus(framework.Success),
295+
wantStatus: fwk.NewStatus(fwk.Success),
295296
},
296297
}
297298

@@ -371,7 +372,7 @@ func TestReserve(t *testing.T) {
371372
name string
372373
pods []*v1.Pod
373374
elasticQuotas map[string]*ElasticQuotaInfo
374-
expectedCodes []framework.Code
375+
expectedCodes []fwk.Code
375376
expected []map[string]*ElasticQuotaInfo
376377
}{
377378
{
@@ -395,9 +396,9 @@ func TestReserve(t *testing.T) {
395396
},
396397
},
397398
},
398-
expectedCodes: []framework.Code{
399-
framework.Success,
400-
framework.Success,
399+
expectedCodes: []fwk.Code{
400+
fwk.Success,
401+
fwk.Success,
401402
},
402403
expected: []map[string]*ElasticQuotaInfo{
403404
{
@@ -805,7 +806,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
805806
pod *v1.Pod
806807
existPods []*v1.Pod
807808
nodes []*v1.Node
808-
nominatedNodeStatus *framework.Status
809+
nominatedNodeStatus *fwk.Status
809810
elasticQuotas map[string]*ElasticQuotaInfo
810811
expected bool
811812
}{
@@ -816,7 +817,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
816817
nodes: []*v1.Node{
817818
st.MakeNode().Name("node-a").Capacity(res).Obj(),
818819
},
819-
nominatedNodeStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
820+
nominatedNodeStatus: fwk.NewStatus(fwk.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
820821
elasticQuotas: map[string]*ElasticQuotaInfo{
821822
"ns1": {
822823
Namespace: "ns1",
@@ -840,7 +841,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
840841
nodes: []*v1.Node{
841842
st.MakeNode().Name("node-a").Capacity(res).Obj(),
842843
},
843-
nominatedNodeStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
844+
nominatedNodeStatus: fwk.NewStatus(fwk.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
844845
elasticQuotas: map[string]*ElasticQuotaInfo{
845846
"ns1": {
846847
Namespace: "ns1",
@@ -1599,7 +1600,7 @@ func TestDeletePod(t *testing.T) {
15991600

16001601
func makeUnschedulableNodeStatusReader() *framework.NodeToStatus {
16011602
nodeStatusReader := framework.NewDefaultNodeToStatus()
1602-
nodeStatusReader.Set("node-a", framework.NewStatus(framework.Unschedulable))
1603+
nodeStatusReader.Set("node-a", fwk.NewStatus(fwk.Unschedulable))
16031604
return nodeStatusReader
16041605
}
16051606

pkg/coscheduling/core/core.go

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
listerv1 "k8s.io/client-go/listers/core/v1"
3434
"k8s.io/client-go/tools/cache"
3535
"k8s.io/klog/v2"
36+
fwk "k8s.io/kube-scheduler/framework"
3637
"k8s.io/kubernetes/pkg/scheduler/framework"
3738
"sigs.k8s.io/controller-runtime/pkg/client"
3839

@@ -58,20 +59,20 @@ type PermitState struct {
5859
Activate bool
5960
}
6061

61-
func (s *PermitState) Clone() framework.StateData {
62+
func (s *PermitState) Clone() fwk.StateData {
6263
return &PermitState{Activate: s.Activate}
6364
}
6465

6566
// Manager defines the interfaces for PodGroup management.
6667
type Manager interface {
6768
PreFilter(context.Context, *corev1.Pod) error
68-
Permit(context.Context, *framework.CycleState, *corev1.Pod) Status
69+
Permit(context.Context, fwk.CycleState, *corev1.Pod) Status
6970
Unreserve(context.Context, *corev1.Pod)
7071
GetPodGroup(context.Context, *corev1.Pod) (string, *v1alpha1.PodGroup)
7172
GetAssignedPodCount(string) int
7273
GetCreationTimestamp(context.Context, *corev1.Pod, time.Time) time.Time
7374
DeletePermittedPodGroup(context.Context, string)
74-
ActivateSiblings(ctx context.Context, pod *corev1.Pod, state *framework.CycleState)
75+
ActivateSiblings(ctx context.Context, pod *corev1.Pod, state fwk.CycleState)
7576
BackoffPodGroup(string, time.Duration)
7677
}
7778

@@ -173,7 +174,7 @@ func (pgMgr *PodGroupManager) BackoffPodGroup(pgName string, backoff time.Durati
173174

174175
// ActivateSiblings stashes the pods belonging to the same PodGroup of the given pod
175176
// in the given state, with a reserved key "kubernetes.io/pods-to-activate".
176-
func (pgMgr *PodGroupManager) ActivateSiblings(ctx context.Context, pod *corev1.Pod, state *framework.CycleState) {
177+
func (pgMgr *PodGroupManager) ActivateSiblings(ctx context.Context, pod *corev1.Pod, state fwk.CycleState) {
177178
lh := klog.FromContext(ctx)
178179
pgName := util.GetPodGroupLabel(pod)
179180
if pgName == "" {
@@ -273,7 +274,7 @@ func (pgMgr *PodGroupManager) PreFilter(ctx context.Context, pod *corev1.Pod) er
273274
}
274275

275276
// Permit permits a pod to run, if the minMember match, it would send a signal to chan.
276-
func (pgMgr *PodGroupManager) Permit(ctx context.Context, state *framework.CycleState, pod *corev1.Pod) Status {
277+
func (pgMgr *PodGroupManager) Permit(ctx context.Context, state fwk.CycleState, pod *corev1.Pod) Status {
277278
pgFullName, pg := pgMgr.GetPodGroup(ctx, pod)
278279
if pgFullName == "" {
279280
return PodGroupNotSpecified
@@ -363,7 +364,7 @@ func (pgMgr *PodGroupManager) GetPodGroup(ctx context.Context, pod *corev1.Pod)
363364

364365
// CheckClusterResource checks if resource capacity of the cluster can satisfy <resourceRequest>.
365366
// It returns an error detailing the resource gap if not satisfied; otherwise returns nil.
366-
func CheckClusterResource(ctx context.Context, nodeList []*framework.NodeInfo, resourceRequest corev1.ResourceList, desiredPodGroupName string) error {
367+
func CheckClusterResource(ctx context.Context, nodeList []fwk.NodeInfo, resourceRequest corev1.ResourceList, desiredPodGroupName string) error {
367368
for _, info := range nodeList {
368369
if info == nil || info.Node() == nil {
369370
continue
@@ -390,32 +391,32 @@ func GetNamespacedName(obj metav1.Object) string {
390391
return fmt.Sprintf("%v/%v", obj.GetNamespace(), obj.GetName())
391392
}
392393

393-
func getNodeResource(ctx context.Context, info *framework.NodeInfo, desiredPodGroupName string) *framework.Resource {
394+
func getNodeResource(ctx context.Context, info fwk.NodeInfo, desiredPodGroupName string) fwk.Resource {
394395
nodeClone := info.Snapshot()
395396
logger := klog.FromContext(ctx)
396-
for _, podInfo := range info.Pods {
397-
if podInfo == nil || podInfo.Pod == nil {
397+
for _, podInfo := range info.GetPods() {
398+
if podInfo == nil || podInfo.GetPod() == nil {
398399
continue
399400
}
400-
if util.GetPodGroupFullName(podInfo.Pod) != desiredPodGroupName {
401+
if util.GetPodGroupFullName(podInfo.GetPod()) != desiredPodGroupName {
401402
continue
402403
}
403-
nodeClone.RemovePod(logger, podInfo.Pod)
404+
nodeClone.RemovePod(logger, podInfo.GetPod())
404405
}
405406

406407
leftResource := framework.Resource{
407408
ScalarResources: make(map[corev1.ResourceName]int64),
408409
}
409-
allocatable := nodeClone.Allocatable
410-
requested := nodeClone.Requested
410+
allocatable := nodeClone.GetAllocatable()
411+
requested := nodeClone.GetRequested()
411412

412-
leftResource.AllowedPodNumber = allocatable.AllowedPodNumber - len(nodeClone.Pods)
413-
leftResource.MilliCPU = allocatable.MilliCPU - requested.MilliCPU
414-
leftResource.Memory = allocatable.Memory - requested.Memory
415-
leftResource.EphemeralStorage = allocatable.EphemeralStorage - requested.EphemeralStorage
413+
leftResource.AllowedPodNumber = allocatable.GetAllowedPodNumber() - len(nodeClone.GetPods())
414+
leftResource.MilliCPU = allocatable.GetMilliCPU() - requested.GetMilliCPU()
415+
leftResource.Memory = allocatable.GetMemory() - requested.GetMemory()
416+
leftResource.EphemeralStorage = allocatable.GetEphemeralStorage() - requested.GetEphemeralStorage()
416417

417-
for k, allocatableEx := range allocatable.ScalarResources {
418-
requestEx, ok := requested.ScalarResources[k]
418+
for k, allocatableEx := range allocatable.GetScalarResources() {
419+
requestEx, ok := requested.GetScalarResources()[k]
419420
if !ok {
420421
leftResource.ScalarResources[k] = allocatableEx
421422
} else {

0 commit comments

Comments
 (0)