Skip to content

Commit c221747

Browse files
perf: Cache taints for existing nodes (#1827)
1 parent 8ce869c commit c221747

File tree

2 files changed

+7
-4
lines changed

2 files changed

+7
-4
lines changed

pkg/controllers/provisioning/scheduling/existingnode.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,15 @@ import (
3131
type ExistingNode struct {
3232
*state.StateNode
3333
cachedAvailable v1.ResourceList // Cache so we don't have to re-subtract resources on the StateNode every time
34+
cachedTaints []v1.Taint // Cache so we don't hae to re-construct the taints each time we attempt to schedule a pod
3435

3536
Pods []*v1.Pod
3637
topology *Topology
3738
requests v1.ResourceList
3839
requirements scheduling.Requirements
3940
}
4041

41-
func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.ResourceList) *ExistingNode {
42+
func NewExistingNode(n *state.StateNode, topology *Topology, taints []v1.Taint, daemonResources v1.ResourceList) *ExistingNode {
4243
// The state node passed in here must be a deep copy from cluster state as we modify it
4344
// the remaining daemonResources to schedule are the total daemonResources minus what has already scheduled
4445
remainingDaemonResources := resources.Subtract(daemonResources, n.DaemonSetRequests())
@@ -54,6 +55,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.
5455
node := &ExistingNode{
5556
StateNode: n,
5657
cachedAvailable: n.Available(),
58+
cachedTaints: taints,
5759
topology: topology,
5860
requests: remainingDaemonResources,
5961
requirements: scheduling.NewLabelRequirements(n.Labels()),
@@ -65,7 +67,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.
6567

6668
func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v1.Pod, podRequests v1.ResourceList) error {
6769
// Check Taints
68-
if err := scheduling.Taints(n.Taints()).Tolerates(pod); err != nil {
70+
if err := scheduling.Taints(n.cachedTaints).Tolerates(pod); err != nil {
6971
return err
7072
}
7173
// determine the volumes that will be mounted if the pod schedules

pkg/controllers/provisioning/scheduling/scheduler.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -318,17 +318,18 @@ func (s *Scheduler) calculateExistingNodeClaims(stateNodes []*state.StateNode, d
318318
// create our existing nodes
319319
for _, node := range stateNodes {
320320
// Calculate any daemonsets that should schedule to the inflight node
321+
taints := node.Taints()
321322
var daemons []*corev1.Pod
322323
for _, p := range daemonSetPods {
323-
if err := scheduling.Taints(node.Taints()).Tolerates(p); err != nil {
324+
if err := scheduling.Taints(taints).Tolerates(p); err != nil {
324325
continue
325326
}
326327
if err := scheduling.NewLabelRequirements(node.Labels()).Compatible(scheduling.NewPodRequirements(p)); err != nil {
327328
continue
328329
}
329330
daemons = append(daemons, p)
330331
}
331-
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, resources.RequestsForPods(daemons...)))
332+
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, taints, resources.RequestsForPods(daemons...)))
332333

333334
// We don't use the status field and instead recompute the remaining resources to ensure we have a consistent view
334335
// of the cluster during scheduling. Depending on how node creation falls out, this will also work for cases where

0 commit comments

Comments
 (0)