Skip to content

Commit

Permalink
Merge branch 'main' into nullable-gitrepo-status
Browse files Browse the repository at this point in the history
  • Loading branch information
cvalerio-va authored Feb 17, 2025
2 parents 2da4965 + e6df5eb commit 2cec144
Show file tree
Hide file tree
Showing 13 changed files with 907 additions and 191 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
export PATH=$PATH:/home/runner/go/bin/
- name: golangci-lint
uses: golangci/golangci-lint-action@v6.3.2
uses: golangci/golangci-lint-action@v6.4.1
with:
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
version: v1.61.0
Expand Down
5 changes: 3 additions & 2 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ jobs:
fi
echo "publishing helm chart in the branch $charts_branch"
if ! git show-ref --quiet "refs/heads/$charts_branch"; then
if ! git ls-remote --exit-code --heads origin "$charts_branch"; then
git checkout --orphan "$charts_branch"
git rm -rf .
Expand All @@ -224,10 +224,11 @@ jobs:
git add README.md LICENSE .gitignore
git commit -m "Initial commit for $charts_branch"
else
git checkout .
git checkout "$charts_branch"
fi
mkdir charts
mkdir -p charts
find dist/ -name '*.tgz' -exec tar -xf {} -C charts/ \;
git add charts/**/*
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@ FleetCI-RootCA
env.multi-cluster
env.single-cluster
/fossa
benchmarks/db
158 changes: 158 additions & 0 deletions benchmarks/cmd/dataset.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
package main

import (
"math"
"strings"

"github.com/rancher/fleet/benchmarks/cmd/parser"

"gonum.org/v1/gonum/stat"
)

type Dataset map[string]map[string]Measurements

// Measurements contains all measurements for an experiment in the population
// and some statistics.
type Measurements struct {
Mean float64
StdDev float64
ZScore float64
Values []float64
}

type scoresByXP map[string]scores

type scores struct {
ZScores []float64
Weights []float64
MeanZScore float64
}

func (s scoresByXP) AvgZScores() float64 {
zscores := []float64{}
for _, xp := range s {
zscores = append(zscores, xp.ZScores...)
}

return stat.Mean(zscores, nil)
}

func skip(name string) bool {
switch name {
case "GCDuration", "Mem", "MemDuring", "ResourceCount":
return true

}
return strings.HasPrefix(name, "RESTClient")
}

// transformDataset takes a sample and transforms it into a dataset.
// The output is organized by experiment, for example:
//
// { "50-gitrepo": {
// "CPU": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
// "GC": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
// },
// "50-bundle": {
// "CPU": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
// "GC": { "mean": 0.5, "stddev": 0.1, values: [0.4, 0.5, 0.6] },
// },
// }
func transformDataset(ds Dataset, sample parser.Sample) {
for name, experiment := range sample.Experiments {
for measurement, value := range experiment.Measurements {
if _, ok := ds[name]; !ok {
ds[name] = map[string]Measurements{}
}
if _, ok := ds[name][measurement]; !ok {
ds[name][measurement] = Measurements{
Values: []float64{},
}
}
tmp := ds[name][measurement]
tmp.Values = append(tmp.Values, value.Value)
ds[name][measurement] = tmp
}
}
}

// calculate calculates the mean, stddev of the measurement. It calculates the zscore of the sample.
// This mutates dsPop and scores.
func calculate(sample *parser.Sample, dsPop Dataset, scores scoresByXP) {
// foreach experiment in population, calculate mean and stddev
for experiment, xp := range dsPop {
for measurement, sg := range xp {
mean, stddev := stat.MeanStdDev(sg.Values, nil)
if math.IsNaN(stddev) || stddev == 0 {
continue
}

if _, ok := sample.Experiments[experiment]; !ok {
//fmt.Printf("missing experiment %s\n", name)
continue
}

if _, ok := sample.Experiments[experiment].Measurements[measurement]; !ok {
//fmt.Printf("missing measurement %s for experiments %s\n", measurement, name)
continue
}

if skip(measurement) {
continue
}

// calculate zscore
m := sample.Experiments[experiment].Measurements[measurement]
zscore := stat.StdScore(m.Value, mean, stddev)
//fmt.Printf("zscore %s - %s %v %v %v\n", experiment, measurement, m, mean, zscore)

// store in dsPop
sg.Mean = mean
sg.StdDev = stddev
sg.ZScore = zscore
dsPop[experiment][measurement] = sg

// store to summarize by experiment
xp := scores[experiment]
xp.ZScores = append(xp.ZScores, zscore)
xp.Weights = append(xp.Weights, weight(measurement))
scores[experiment] = xp
}
}

// Summarize experiments
for name, xp := range scores {
avg := stat.Mean(xp.ZScores, xp.Weights)
xp.MeanZScore = avg
scores[name] = xp
//fmt.Printf("%s %v %v %v\n", name, avg, xp.ZScores, xp.Weights)
}

}

// Some measurements have a higher volatility than others, or are duplicated.
// Only TotalDuration is used, as it is shown in :he result table.
//
// "CPU": 14.029999999999973,
// "GCDuration": 1.9185229570000004,
// "Mem": 4,
// "MemDuring": 4,
// "NetworkRX": 68288672,
// "NetworkTX": 30662826,
// "ReconcileErrors": 0,
// "ReconcileRequeue": 65,
// "ReconcileRequeueAfter": 462,
// "ReconcileSuccess": 2329,
// "ReconcileTime": 8153.420151420956,
// "ResourceCount": 300,
// "WorkqueueAdds": 2844,
// "WorkqueueQueueDuration": 3911.157310051014,
// "WorkqueueRetries": 527,
// "WorkqueueWorkDuration": 8169.425508522996
func weight(name string) float64 {
if name == "TotalDuration" {
return 1.0
}

return 0.0
}
102 changes: 102 additions & 0 deletions benchmarks/cmd/db.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
package main

import (
"encoding/json"
"fmt"
"os"
"path/filepath"

"github.com/rancher/fleet/benchmarks/cmd/parser"

gm "github.com/onsi/gomega/gmeasure"

"github.com/onsi/ginkgo/v2/types"
)

// Population has data from all json reports
type Population struct {
Samples []parser.Sample
}

// loadDB loads all the json files from db folder and parses them.
// The json files are generated by ginkgo --json-report.
//
// To inspect them manually use jq:
//
// jq '.[0].SpecReports.[].State' < b-2024-11-11_19:15:08.json
// jq '.[0].SpecReports.[].ReportEntries' < b-2024-11-12_15:25:33.json
// jq '.[0].SpecReports.[].ReportEntries.[0].Name' < b-2024-11-12_15:25:33.json
func loadDB(db string) (*Population, error) {
files, err := filepath.Glob(db + "/*.json")
if err != nil {
return nil, err
}

pop := &Population{}

for _, file := range files {
if s, err := loadSampleFile(file); err != nil {
return nil, err
} else if s != nil {
pop.Samples = append(pop.Samples, *s)
}
}

return pop, nil
}

// loadSampleFile loads a single ginkgo json report file
// This loads any measurements from ginkgo measurement, but it has special
// handling for MemDuring and measurements that use the Before/After suffixes.
//
// To generate such a report json run for example:
//
// ginkgo run --json-report out.json ./benchmarks
func loadSampleFile(file string) (*parser.Sample, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
reports := []types.Report{}
err = json.Unmarshal(data, &reports)
if err != nil {
fmt.Printf("error: %s\n", data)
return nil, err
}
if len(reports) < 1 {
return nil, nil
}

r := (reports)[0]
if len(r.SpecReports) < 1 {
return nil, nil
}
if r.SpecReports[0].State != types.SpecStatePassed {
return nil, nil
}

s := parser.Sample{
Experiments: map[string]parser.Experiment{},
Setup: map[string]parser.Measurement{},
}

d, err := parser.NewSetup(r.SpecReports, s.Setup)
if err != nil {
return nil, err
}
s.Description = d

total, err := parser.NewExperiments(r.SpecReports, s.Experiments)
if err != nil {
return nil, err
}

s.Setup["TotalDuration"] = parser.Measurement{
Type: gm.MeasurementTypeDuration,
Style: "{{bold}}",
PrecisionBundle: gm.DefaultPrecisionBundle,
Value: total,
}

return &s, nil
}
Loading

0 comments on commit 2cec144

Please sign in to comment.