@@ -31,14 +31,15 @@ import (
31
31
type ExistingNode struct {
32
32
* state.StateNode
33
33
cachedAvailable v1.ResourceList // Cache so we don't have to re-subtract resources on the StateNode every time
34
+ cachedTaints []v1.Taint // Cache so we don't hae to re-construct the taints each time we attempt to schedule a pod
34
35
35
36
Pods []* v1.Pod
36
37
topology * Topology
37
38
requests v1.ResourceList
38
39
requirements scheduling.Requirements
39
40
}
40
41
41
- func NewExistingNode (n * state.StateNode , topology * Topology , daemonResources v1.ResourceList ) * ExistingNode {
42
+ func NewExistingNode (n * state.StateNode , topology * Topology , taints []v1. Taint , daemonResources v1.ResourceList ) * ExistingNode {
42
43
// The state node passed in here must be a deep copy from cluster state as we modify it
43
44
// the remaining daemonResources to schedule are the total daemonResources minus what has already scheduled
44
45
remainingDaemonResources := resources .Subtract (daemonResources , n .DaemonSetRequests ())
@@ -54,6 +55,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.
54
55
node := & ExistingNode {
55
56
StateNode : n ,
56
57
cachedAvailable : n .Available (),
58
+ cachedTaints : taints ,
57
59
topology : topology ,
58
60
requests : remainingDaemonResources ,
59
61
requirements : scheduling .NewLabelRequirements (n .Labels ()),
@@ -65,7 +67,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.
65
67
66
68
func (n * ExistingNode ) Add (ctx context.Context , kubeClient client.Client , pod * v1.Pod , podRequests v1.ResourceList ) error {
67
69
// Check Taints
68
- if err := scheduling .Taints (n .Taints () ).Tolerates (pod ); err != nil {
70
+ if err := scheduling .Taints (n .cachedTaints ).Tolerates (pod ); err != nil {
69
71
return err
70
72
}
71
73
// determine the volumes that will be mounted if the pod schedules
0 commit comments