forked from kubernetes/perf-tests
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutil.go
216 lines (192 loc) · 7.73 KB
/
util.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"fmt"
"math"
"sort"
"strconv"
"text/tabwriter"
"k8s.io/kubernetes/test/e2e/perftype"
"github.com/golang/glog"
)
// MetricKey is used to identify a metric uniquely.
type MetricKey struct {
TestName string // Name of the test ("Load Capacity", "Density", etc)
Verb string // "GET","LIST",etc for API calls and "POD STARTUP" for pod startup
Resource string // "nodes","pods",etc for API calls and empty value for pod startup
Subresource string // "status","binding",etc. Empty for pod startup and most API calls
Scope string // Used for API calls: "resource" (for GETs), "namespace"/"cluster" (for LISTs).
Percentile string // The percentile string ("Perc50", "Perc90", etc)
}
// MetricComparisonData holds all the values corresponding to a metric's comparison.
type MetricComparisonData struct {
LeftJobSample []float64 // Sample values from the left job's runs
RightJobSample []float64 // Sample values from the right job's runs
Matched bool // Boolean indicating if the samples matched
Comments string // Any comments wrt the matching (for human interpretation)
// Below are some common statistical measures, that we would compute for the left
// and right job samples. They are used by some comparison schemes.
AvgL, AvgR, AvgRatio float64 // Average
StDevL, StDevR float64 // Standard deviation
MaxL, MaxR float64 // Max value
}
// JobComparisonData is a struct holding a map with keys as the metrics' keys and
// values as their comparison data.
type JobComparisonData struct {
Data map[MetricKey]*MetricComparisonData
}
// MetricFilterFunc tells if a given MetricKey is to be filtered out.
type MetricFilterFunc func(MetricKey, MetricComparisonData) bool
// NewJobComparisonData is a constructor for JobComparisonData struct.
func NewJobComparisonData() *JobComparisonData {
return &JobComparisonData{
Data: make(map[MetricKey]*MetricComparisonData),
}
}
type metricKeyDataPair struct {
metricKey MetricKey
metricData *MetricComparisonData
}
type metricKeyDataPairList []metricKeyDataPair
// We define these functions to implement sort interface on metricKeyDataPairList.
func (metricsList metricKeyDataPairList) Len() int {
return len(metricsList)
}
func (metricsList metricKeyDataPairList) Less(i, j int) bool {
if math.IsNaN(metricsList[i].metricData.AvgRatio) {
return true
}
if math.IsNaN(metricsList[j].metricData.AvgRatio) {
return false
}
return metricsList[i].metricData.AvgRatio <= metricsList[j].metricData.AvgRatio
}
func (metricsList metricKeyDataPairList) Swap(i, j int) {
metricsList[i], metricsList[j] = metricsList[j], metricsList[i]
}
func getMetricsSortedByAvgRatio(j *JobComparisonData) metricKeyDataPairList {
metricsList := make(metricKeyDataPairList, len(j.Data))
i := 0
for metricKey, metricData := range j.Data {
metricsList[i] = metricKeyDataPair{metricKey, metricData}
i++
}
sort.Sort(sort.Reverse(metricsList))
return metricsList
}
// PrettyPrintWithFilter prints the job comparison data in a table with columns aligned,
// after sorting the metrics by their avg ratio and removing entries based on filter.
func (j *JobComparisonData) PrettyPrintWithFilter(filter MetricFilterFunc) {
metricsList := getMetricsSortedByAvgRatio(j)
var buf bytes.Buffer
w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0)
fmt.Fprintf(w, "E2E TEST\tVERB\tRESOURCE\tSUBRESOURCE\tSCOPE\tPERCENTILE\tCOMMENTS\n")
for _, metricPair := range metricsList {
key, data := metricPair.metricKey, metricPair.metricData
if filter(key, *data) {
continue
}
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\n", key.TestName, key.Verb, key.Resource, key.Subresource, key.Scope, key.Percentile, data.Comments)
}
w.Flush()
glog.Infof("\n%v", buf.String())
}
// PrettyPrint prints the job comparison data in a table without any filtering.
func (j *JobComparisonData) PrettyPrint() {
j.PrettyPrintWithFilter(func(_ MetricKey, _ MetricComparisonData) bool { return false })
}
// Adds a sample value (if not NaN) to a given metric's MetricComparisonData.
func (j *JobComparisonData) addSampleValue(sample float64, testName, verb, resource, subresource, scope, percentile string, fromLeftJob bool) {
if math.IsNaN(sample) {
return
}
// Check if the metric exists in the map already, and add it if necessary.
metricKey := MetricKey{testName, verb, resource, subresource, scope, percentile}
if _, ok := j.Data[metricKey]; !ok {
j.Data[metricKey] = &MetricComparisonData{}
}
// Add the sample to the metric's comparison data.
if fromLeftJob {
j.Data[metricKey].LeftJobSample = append(j.Data[metricKey].LeftJobSample, sample)
} else {
j.Data[metricKey].RightJobSample = append(j.Data[metricKey].RightJobSample, sample)
}
}
func (j *JobComparisonData) addLatencyValue(latency *perftype.DataItem, minAllowedRequestCount int, testName string, fromLeftJob bool) {
if latency.Labels["Count"] != "" {
if count, err := strconv.Atoi(latency.Labels["Count"]); err != nil || count < minAllowedRequestCount {
return
}
}
verb := latency.Labels["Verb"]
resource := latency.Labels["Resource"]
subresource := latency.Labels["Subresource"]
scope := latency.Labels["Scope"]
if latency.Labels["Metric"] == "pod_startup" {
verb = "Pod-Startup"
}
for percentile, value := range latency.Data {
j.addSampleValue(value, testName, verb, resource, subresource, scope, percentile, fromLeftJob)
}
}
// GetFlattennedComparisonData flattens latencies from various runs of left & right jobs into JobComparisonData.
// In the process, it also discards those metric samples with request count less than minAllowedAPIRequestCount.
func GetFlattennedComparisonData(leftJobMetrics, rightJobMetrics []map[string][]perftype.PerfData, minAllowedAPIRequestCount int) *JobComparisonData {
j := NewJobComparisonData()
for _, singleRunMetrics := range leftJobMetrics {
for testName, latenciesArray := range singleRunMetrics {
for _, latencies := range latenciesArray {
for _, latency := range latencies.DataItems {
j.addLatencyValue(&latency, minAllowedAPIRequestCount, testName, true)
}
}
}
}
for _, singleRunMetrics := range rightJobMetrics {
for testName, latenciesArray := range singleRunMetrics {
for _, latencies := range latenciesArray {
for _, latency := range latencies.DataItems {
j.addLatencyValue(&latency, minAllowedAPIRequestCount, testName, false)
}
}
}
}
return j
}
func computeSampleStats(sample []float64, avg, stDev, maxVal *float64) {
length := len(sample)
if length == 0 {
*avg = math.NaN()
*stDev = math.NaN()
*maxVal = math.NaN()
return
}
sum := 0.0
squareSum := 0.0
for i := 0; i < length; i++ {
sum += sample[i]
squareSum += sample[i] * sample[i]
*maxVal = math.Max(*maxVal, sample[i])
}
*avg = sum / float64(length)
*stDev = math.Sqrt(squareSum/float64(length) - (*avg * *avg))
}
// ComputeStatsForMetricSamples computes avg, std-dev and max for each metric's left and right samples.
func (j *JobComparisonData) ComputeStatsForMetricSamples() {
for _, metricData := range j.Data {
computeSampleStats(metricData.LeftJobSample, &metricData.AvgL, &metricData.StDevL, &metricData.MaxL)
computeSampleStats(metricData.RightJobSample, &metricData.AvgR, &metricData.StDevR, &metricData.MaxR)
}
}