forked from kubernetes/autoscaler
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscale_up_status_processor.go
147 lines (124 loc) · 5.31 KB
/
scale_up_status_processor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset"
)
// ScaleUpStatus is the status of a scale-up attempt. This includes information
// on if scale-up happened, description of scale-up operation performed and
// status of pods that took part in the scale-up evaluation.
type ScaleUpStatus struct {
Result ScaleUpResult
ScaleUpError *errors.AutoscalerError
ScaleUpInfos []nodegroupset.ScaleUpInfo
PodsTriggeredScaleUp []*apiv1.Pod
PodsRemainUnschedulable []NoScaleUpInfo
PodsAwaitEvaluation []*apiv1.Pod
CreateNodeGroupResults []nodegroups.CreateNodeGroupResult
ConsideredNodeGroups []cloudprovider.NodeGroup
FailedCreationNodeGroups []cloudprovider.NodeGroup
FailedResizeNodeGroups []cloudprovider.NodeGroup
}
// NoScaleUpInfo contains information about a pod that didn't trigger scale-up.
type NoScaleUpInfo struct {
Pod *apiv1.Pod
RejectedNodeGroups map[string]Reasons
SkippedNodeGroups map[string]Reasons
}
// ScaleUpResult represents the result of a scale up.
type ScaleUpResult int
const (
// ScaleUpSuccessful - a scale-up successfully occurred.
ScaleUpSuccessful ScaleUpResult = iota
// ScaleUpError - an unexpected error occurred during the scale-up attempt.
ScaleUpError
// ScaleUpNoOptionsAvailable - there were no node groups that could be considered for the scale-up.
ScaleUpNoOptionsAvailable
// ScaleUpNotNeeded - there was no need for a scale-up e.g. because there were no unschedulable pods.
ScaleUpNotNeeded
// ScaleUpNotTried - the scale up wasn't even attempted, e.g. an autoscaling iteration was skipped, or
// an error occurred before the scale up logic.
ScaleUpNotTried
// ScaleUpInCooldown - the scale up wasn't even attempted, because it's in a cooldown state (it's suspended for a scheduled period of time).
ScaleUpInCooldown
// ScaleUpLimitedByMaxNodesTotal - the scale up wasn't attempted, because the cluster reached max nodes total
ScaleUpLimitedByMaxNodesTotal
)
// WasSuccessful returns true if the scale-up was successful.
func (s *ScaleUpStatus) WasSuccessful() bool {
return s.Result == ScaleUpSuccessful
}
// Reasons interface provides a list of reasons for why something happened or didn't happen.
type Reasons interface {
Reasons() []string
}
// ScaleUpStatusProcessor processes the status of the cluster after a scale-up.
type ScaleUpStatusProcessor interface {
Process(context *context.AutoscalingContext, status *ScaleUpStatus)
CleanUp()
}
// NewDefaultScaleUpStatusProcessor creates a default instance of ScaleUpStatusProcessor.
func NewDefaultScaleUpStatusProcessor() ScaleUpStatusProcessor {
return &EventingScaleUpStatusProcessor{}
}
// NoOpScaleUpStatusProcessor is a ScaleUpStatusProcessor implementations useful for testing.
type NoOpScaleUpStatusProcessor struct{}
// Process processes the status of the cluster after a scale-up.
func (p *NoOpScaleUpStatusProcessor) Process(context *context.AutoscalingContext, status *ScaleUpStatus) {
}
// CleanUp cleans up the processor's internal structures.
func (p *NoOpScaleUpStatusProcessor) CleanUp() {
}
// CombinedScaleUpStatusProcessor is a list of ScaleUpStatusProcessor
type CombinedScaleUpStatusProcessor struct {
processors []ScaleUpStatusProcessor
}
// NewCombinedScaleUpStatusProcessor construct CombinedScaleUpStatusProcessor.
func NewCombinedScaleUpStatusProcessor(processors []ScaleUpStatusProcessor) *CombinedScaleUpStatusProcessor {
var scaleUpProcessors []ScaleUpStatusProcessor
for _, processor := range processors {
if processor != nil {
scaleUpProcessors = append(scaleUpProcessors, processor)
}
}
return &CombinedScaleUpStatusProcessor{scaleUpProcessors}
}
// AddProcessor append processor to the list.
func (p *CombinedScaleUpStatusProcessor) AddProcessor(processor ScaleUpStatusProcessor) {
if processor != nil {
p.processors = append(p.processors, processor)
}
}
// Process runs sub-processors sequentially in the same order of addition
func (p *CombinedScaleUpStatusProcessor) Process(ctx *context.AutoscalingContext, status *ScaleUpStatus) {
for _, processor := range p.processors {
processor.Process(ctx, status)
}
}
// CleanUp cleans up the processor's internal structures.
func (p *CombinedScaleUpStatusProcessor) CleanUp() {
for _, processor := range p.processors {
processor.CleanUp()
}
}
// UpdateScaleUpError updates ScaleUpStatus.
func UpdateScaleUpError(s *ScaleUpStatus, err errors.AutoscalerError) (*ScaleUpStatus, errors.AutoscalerError) {
s.ScaleUpError = &err
s.Result = ScaleUpError
return s, err
}