Skip to content

Commit 2cec144

Browse files
authored
Merge branch 'main' into nullable-gitrepo-status
2 parents 2da4965 + e6df5eb commit 2cec144

File tree

13 files changed

+907
-191
lines changed

13 files changed

+907
-191
lines changed

.github/workflows/golangci-lint.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ jobs:
2828
export PATH=$PATH:/home/runner/go/bin/
2929
3030
- name: golangci-lint
31-
uses: golangci/golangci-lint-action@v6.3.2
31+
uses: golangci/golangci-lint-action@v6.4.1
3232
with:
3333
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
3434
version: v1.61.0

.github/workflows/release.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ jobs:
212212
fi
213213
214214
echo "publishing helm chart in the branch $charts_branch"
215-
if ! git show-ref --quiet "refs/heads/$charts_branch"; then
215+
if ! git ls-remote --exit-code --heads origin "$charts_branch"; then
216216
git checkout --orphan "$charts_branch"
217217
git rm -rf .
218218
@@ -224,10 +224,11 @@ jobs:
224224
git add README.md LICENSE .gitignore
225225
git commit -m "Initial commit for $charts_branch"
226226
else
227+
git checkout .
227228
git checkout "$charts_branch"
228229
fi
229230
230-
mkdir charts
231+
mkdir -p charts
231232
find dist/ -name '*.tgz' -exec tar -xf {} -C charts/ \;
232233
233234
git add charts/**/*

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,4 @@ FleetCI-RootCA
1818
env.multi-cluster
1919
env.single-cluster
2020
/fossa
21+
benchmarks/db

benchmarks/cmd/dataset.go

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
package main
2+
3+
import (
4+
"math"
5+
"strings"
6+
7+
"github.com/rancher/fleet/benchmarks/cmd/parser"
8+
9+
"gonum.org/v1/gonum/stat"
10+
)
11+
12+
type Dataset map[string]map[string]Measurements
13+
14+
// Measurements contains all measurements for an experiment in the population
15+
// and some statistics.
16+
type Measurements struct {
17+
Mean float64
18+
StdDev float64
19+
ZScore float64
20+
Values []float64
21+
}
22+
23+
type scoresByXP map[string]scores
24+
25+
type scores struct {
26+
ZScores []float64
27+
Weights []float64
28+
MeanZScore float64
29+
}
30+
31+
func (s scoresByXP) AvgZScores() float64 {
32+
zscores := []float64{}
33+
for _, xp := range s {
34+
zscores = append(zscores, xp.ZScores...)
35+
}
36+
37+
return stat.Mean(zscores, nil)
38+
}
39+
40+
func skip(name string) bool {
41+
switch name {
42+
case "GCDuration", "Mem", "MemDuring", "ResourceCount":
43+
return true
44+
45+
}
46+
return strings.HasPrefix(name, "RESTClient")
47+
}
48+
49+
// transformDataset takes a sample and transforms it into a dataset.
50+
// The output is organized by experiment, for example:
51+
//
52+
// { "50-gitrepo": {
53+
// "CPU": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
54+
// "GC": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
55+
// },
56+
// "50-bundle": {
57+
// "CPU": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
58+
// "GC": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
59+
// },
60+
// }
61+
func transformDataset(ds Dataset, sample parser.Sample) {
62+
for name, experiment := range sample.Experiments {
63+
for measurement, value := range experiment.Measurements {
64+
if _, ok := ds[name]; !ok {
65+
ds[name] = map[string]Measurements{}
66+
}
67+
if _, ok := ds[name][measurement]; !ok {
68+
ds[name][measurement] = Measurements{
69+
Values: []float64{},
70+
}
71+
}
72+
tmp := ds[name][measurement]
73+
tmp.Values = append(tmp.Values, value.Value)
74+
ds[name][measurement] = tmp
75+
}
76+
}
77+
}
78+
79+
// calculate calculates the mean, stddev of the measurement. It calculates the zscore of the sample.
80+
// This mutates dsPop and scores.
81+
func calculate(sample *parser.Sample, dsPop Dataset, scores scoresByXP) {
82+
// foreach experiment in population, calculate mean and stddev
83+
for experiment, xp := range dsPop {
84+
for measurement, sg := range xp {
85+
mean, stddev := stat.MeanStdDev(sg.Values, nil)
86+
if math.IsNaN(stddev) || stddev == 0 {
87+
continue
88+
}
89+
90+
if _, ok := sample.Experiments[experiment]; !ok {
91+
//fmt.Printf("missing experiment %s\n", name)
92+
continue
93+
}
94+
95+
if _, ok := sample.Experiments[experiment].Measurements[measurement]; !ok {
96+
//fmt.Printf("missing measurement %s for experiments %s\n", measurement, name)
97+
continue
98+
}
99+
100+
if skip(measurement) {
101+
continue
102+
}
103+
104+
// calculate zscore
105+
m := sample.Experiments[experiment].Measurements[measurement]
106+
zscore := stat.StdScore(m.Value, mean, stddev)
107+
//fmt.Printf("zscore %s - %s %v %v %v\n", experiment, measurement, m, mean, zscore)
108+
109+
// store in dsPop
110+
sg.Mean = mean
111+
sg.StdDev = stddev
112+
sg.ZScore = zscore
113+
dsPop[experiment][measurement] = sg
114+
115+
// store to summarize by experiment
116+
xp := scores[experiment]
117+
xp.ZScores = append(xp.ZScores, zscore)
118+
xp.Weights = append(xp.Weights, weight(measurement))
119+
scores[experiment] = xp
120+
}
121+
}
122+
123+
// Summarize experiments
124+
for name, xp := range scores {
125+
avg := stat.Mean(xp.ZScores, xp.Weights)
126+
xp.MeanZScore = avg
127+
scores[name] = xp
128+
//fmt.Printf("%s %v %v %v\n", name, avg, xp.ZScores, xp.Weights)
129+
}
130+
131+
}
132+
133+
// Some measurements have a higher volatility than others, or are duplicated.
134+
// Only TotalDuration is used, as it is shown in :he result table.
135+
//
136+
// "CPU": 14.029999999999973,
137+
// "GCDuration": 1.9185229570000004,
138+
// "Mem": 4,
139+
// "MemDuring": 4,
140+
// "NetworkRX": 68288672,
141+
// "NetworkTX": 30662826,
142+
// "ReconcileErrors": 0,
143+
// "ReconcileRequeue": 65,
144+
// "ReconcileRequeueAfter": 462,
145+
// "ReconcileSuccess": 2329,
146+
// "ReconcileTime": 8153.420151420956,
147+
// "ResourceCount": 300,
148+
// "WorkqueueAdds": 2844,
149+
// "WorkqueueQueueDuration": 3911.157310051014,
150+
// "WorkqueueRetries": 527,
151+
// "WorkqueueWorkDuration": 8169.425508522996
152+
func weight(name string) float64 {
153+
if name == "TotalDuration" {
154+
return 1.0
155+
}
156+
157+
return 0.0
158+
}

benchmarks/cmd/db.go

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
package main
2+
3+
import (
4+
"encoding/json"
5+
"fmt"
6+
"os"
7+
"path/filepath"
8+
9+
"github.com/rancher/fleet/benchmarks/cmd/parser"
10+
11+
gm "github.com/onsi/gomega/gmeasure"
12+
13+
"github.com/onsi/ginkgo/v2/types"
14+
)
15+
16+
// Population has data from all json reports
17+
type Population struct {
18+
Samples []parser.Sample
19+
}
20+
21+
// loadDB loads all the json files from db folder and parses them.
22+
// The json files are generated by ginkgo --json-report.
23+
//
24+
// To inspect them manually use jq:
25+
//
26+
// jq '.[0].SpecReports.[].State' < b-2024-11-11_19:15:08.json
27+
// jq '.[0].SpecReports.[].ReportEntries' < b-2024-11-12_15:25:33.json
28+
// jq '.[0].SpecReports.[].ReportEntries.[0].Name' < b-2024-11-12_15:25:33.json
29+
func loadDB(db string) (*Population, error) {
30+
files, err := filepath.Glob(db + "/*.json")
31+
if err != nil {
32+
return nil, err
33+
}
34+
35+
pop := &Population{}
36+
37+
for _, file := range files {
38+
if s, err := loadSampleFile(file); err != nil {
39+
return nil, err
40+
} else if s != nil {
41+
pop.Samples = append(pop.Samples, *s)
42+
}
43+
}
44+
45+
return pop, nil
46+
}
47+
48+
// loadSampleFile loads a single ginkgo json report file
49+
// This loads any measurements from ginkgo measurement, but it has special
50+
// handling for MemDuring and measurements that use the Before/After suffixes.
51+
//
52+
// To generate such a report json run for example:
53+
//
54+
// ginkgo run --json-report out.json ./benchmarks
55+
func loadSampleFile(file string) (*parser.Sample, error) {
56+
data, err := os.ReadFile(file)
57+
if err != nil {
58+
return nil, err
59+
}
60+
reports := []types.Report{}
61+
err = json.Unmarshal(data, &reports)
62+
if err != nil {
63+
fmt.Printf("error: %s\n", data)
64+
return nil, err
65+
}
66+
if len(reports) < 1 {
67+
return nil, nil
68+
}
69+
70+
r := (reports)[0]
71+
if len(r.SpecReports) < 1 {
72+
return nil, nil
73+
}
74+
if r.SpecReports[0].State != types.SpecStatePassed {
75+
return nil, nil
76+
}
77+
78+
s := parser.Sample{
79+
Experiments: map[string]parser.Experiment{},
80+
Setup: map[string]parser.Measurement{},
81+
}
82+
83+
d, err := parser.NewSetup(r.SpecReports, s.Setup)
84+
if err != nil {
85+
return nil, err
86+
}
87+
s.Description = d
88+
89+
total, err := parser.NewExperiments(r.SpecReports, s.Experiments)
90+
if err != nil {
91+
return nil, err
92+
}
93+
94+
s.Setup["TotalDuration"] = parser.Measurement{
95+
Type: gm.MeasurementTypeDuration,
96+
Style: "{{bold}}",
97+
PrecisionBundle: gm.DefaultPrecisionBundle,
98+
Value: total,
99+
}
100+
101+
return &s, nil
102+
}

0 commit comments

Comments
 (0)