Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions .github/actions/systemtests/generate-matrix/pipelines.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -392,3 +392,16 @@ pipelines:
strimzi_rbac_scope: "CLUSTER"
cluster_operator_install_type: "yaml"
parallel: 4
###################
### Performance ###
###################
# x86-64
- agent: "oracle-vm-8cpu-32gb-x86-64"
arch: "amd64"
pipeline: "performance"
profile: "performance"
timeout: 180
strimzi_feature_gates: ""
strimzi_rbac_scope: "CLUSTER"
cluster_operator_install_type: "yaml"
parallel: 1
227 changes: 227 additions & 0 deletions .github/actions/systemtests/perf-report/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,227 @@
name: "Performance Report Generator"
description: "Parses performance test results and generates a markdown summary for GitHub Actions"

inputs:
performance-dir:
description: "Path to the performance results directory"
required: false
default: "systemtest/target/performance"

outputs:
summary:
description: "Markdown summary of performance results"
value: ${{ steps.generate_report.outputs.summary }}
has-results:
description: "Whether performance results were found"
value: ${{ steps.generate_report.outputs.has_results }}
timestamp:
description: "Timestamp of the performance test run"
value: ${{ steps.generate_report.outputs.timestamp }}

runs:
using: "composite"
steps:
- name: Generate Performance Report
id: generate_report
uses: actions/github-script@v7
env:
PERF_DIR: ${{ inputs.performance-dir }}
with:
script: |
const fs = require('fs');
const path = require('path');

const perfDir = process.env.PERF_DIR || 'systemtest/target/performance';

/**
* Find the latest timestamped results directory
*/
function findLatestResultsDir(baseDir) {
if (!fs.existsSync(baseDir)) {
core.warning(`Performance directory not found: ${baseDir}`);
return null;
}

const entries = fs.readdirSync(baseDir, { withFileTypes: true });
const timestampDirs = entries
.filter(entry => entry.isDirectory())
.map(entry => entry.name)
.sort()
.reverse();

if (timestampDirs.length === 0) {
core.warning(`No timestamp directories found in ${baseDir}`);
return null;
}

return path.join(baseDir, timestampDirs[0]);
}

/**
* Parse a results-table.txt file
*/
function parseResultsTable(tableFile) {
if (!fs.existsSync(tableFile)) {
return null;
}

const content = fs.readFileSync(tableFile, 'utf8');
const lines = content.trim().split('\n');

if (lines.length === 0) {
return null;
}

// Extract use case name from first line
const useCase = lines[0].replace('Use Case: ', '').trim();

// Find data lines (lines starting with |, excluding separator lines with +)
const dataLines = lines.filter(line => line.startsWith('|') && !line.startsWith('+'));

if (dataLines.length < 2) {
return { useCase, experiments: [], rawTable: content };
}

// Parse header
const header = dataLines[0].split('|')
.slice(1, -1)
.map(col => col.trim());

// Parse experiment rows
const experiments = [];
for (let i = 1; i < dataLines.length; i++) {
const values = dataLines[i].split('|')
.slice(1, -1)
.map(col => col.trim());

if (values.length === header.length) {
const experiment = {};
header.forEach((h, idx) => {
experiment[h] = values[idx];
});
experiments.push(experiment);
}
}

return {
useCase,
header,
experiments,
rawTable: content
};
}

/**
* Parse results for a specific operator
*/
function parseOperatorResults(operatorDir) {
if (!fs.existsSync(operatorDir)) {
return null;
}

const resultsTablePath = path.join(operatorDir, 'results-table.txt');
const operatorName = path.basename(operatorDir);

return {
operator: operatorName,
resultsTable: parseResultsTable(resultsTablePath)
};
}

/**
* Format timestamp from directory name (yyyy-MM-dd-HH-mm-ss) to readable format
*/
function formatTimestamp(timestamp) {
// Parse format: 2025-11-07-17-39-26
const parts = timestamp.split('-');
if (parts.length === 6) {
const [year, month, day, hour, minute] = parts;
return `${year}-${month}-${day} ${hour}:${minute}`;
}
return timestamp;
}

/**
* Generate markdown summary
*/
function generateMarkdownSummary(results) {
const lines = [
`**Test Run:** \`${formatTimestamp(results.timestamp)}\``,
''
];

for (const [operatorName, operatorData] of Object.entries(results.operators)) {
if (!operatorData || !operatorData.resultsTable) {
continue;
}

const title = operatorName.replace(/-/g, ' ').replace(/\b\w/g, c => c.toUpperCase());
lines.push(`## ${title}`);
lines.push('');

const tableData = operatorData.resultsTable;

if (!tableData.experiments || tableData.experiments.length === 0) {
lines.push('_No results available_');
lines.push('');
continue;
}

// Add the raw table (already contains use case header)
lines.push(tableData.rawTable);
lines.push('');
}

lines.push('---');
lines.push('_Performance results generated automatically_');

return lines.join('\n');
}

// Main execution
try {
const latestDir = findLatestResultsDir(perfDir);

if (!latestDir) {
core.setOutput('has_results', 'false');
core.setOutput('summary', '_No performance results found_');
core.setOutput('timestamp', '');
return;
}

const timestamp = path.basename(latestDir);
core.info(`Found performance results: ${timestamp}`);

const results = {
timestamp,
operators: {}
};

// Parse topic-operator results
const topicOpDir = path.join(latestDir, 'topic-operator');
const topicOpResults = parseOperatorResults(topicOpDir);
if (topicOpResults) {
results.operators['topic-operator'] = topicOpResults;
}

// Parse user-operator results
const userOpDir = path.join(latestDir, 'user-operator');
const userOpResults = parseOperatorResults(userOpDir);
if (userOpResults) {
results.operators['user-operator'] = userOpResults;
}

const summary = generateMarkdownSummary(results);

core.setOutput('has_results', 'true');
core.setOutput('summary', summary);
core.setOutput('timestamp', timestamp);

core.info('Performance report generated successfully');

} catch (error) {
core.error(`Error generating performance report: ${error.message}`);
core.setOutput('has_results', 'false');
core.setOutput('summary', `_Error generating performance report: ${error.message}_`);
core.setOutput('timestamp', '');
}
7 changes: 7 additions & 0 deletions .github/workflows/run-system-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,13 @@ jobs:
name: systemtest-logs-${{ matrix.config.pipeline }}-${{ matrix.config.profile }}-${{ matrix.config.agent }}
path: systemtest-logs

- name: Archive performance results
if: ${{ always() && matrix.config.profile == 'performance' }}
uses: actions/upload-artifact@v4
with:
name: performance-results-${{ matrix.config.pipeline }}-${{ matrix.config.profile }}-${{ matrix.config.agent }}
path: systemtest/target/performance

- name: Set check & commit status
if: ${{ always() }}
uses: ./.github/actions/utils/check-and-status
Expand Down
71 changes: 67 additions & 4 deletions .github/workflows/system-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -293,10 +293,10 @@ jobs:
- generate-matrix
- run-tests
if: |-
${{
always() &&
needs.parse-params.outputs.shouldRun == 'true' &&
needs.check-rights.result == 'success' &&
${{
always() &&
needs.parse-params.outputs.shouldRun == 'true' &&
needs.check-rights.result == 'success' &&
github.event_name != 'pull_request' &&
needs.generate-matrix.outputs.isValid == 'true'
}}
Expand Down Expand Up @@ -324,3 +324,66 @@ jobs:
checkState: ${{ needs.run-tests.result == 'skipped' && 'neutral' || needs.run-tests.result }}
checkName: "System tests verification"
checkDescription: "Check for overall system test verification"

perf-report:
name: Performance Report
needs:
- parse-params
- check-rights
- generate-matrix
- run-tests
# Only run if performance profile/pipeline was executed
if: |-
${{
always() &&
needs.parse-params.outputs.shouldRun == 'true' &&
needs.check-rights.result == 'success' &&
github.event_name != 'pull_request' &&
needs.generate-matrix.outputs.isValid == 'true' &&
(contains(needs.parse-params.outputs.profileList, 'performance') || contains(needs.parse-params.outputs.pipelineList, 'performance'))
}}
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- uses: actions/checkout@v5
with:
ref: ${{ needs.parse-params.outputs.ref }}

- name: Download performance results
uses: actions/download-artifact@v4
with:
pattern: performance-results-*
path: systemtest/target/performance-artifacts
merge-multiple: true

- name: List downloaded artifacts
run: |
echo "Downloaded performance artifacts:"
find systemtest/target/performance-artifacts -type f | head -20

- name: Generate performance report
id: generate_report
uses: ./.github/actions/systemtests/perf-report
with:
performance-dir: systemtest/target/performance-artifacts

- name: Add performance report comment
if: ${{ steps.generate_report.outputs.has-results == 'true' }}
uses: ./.github/actions/utils/add-comment
with:
commentMessage: |
## Performance Test Results

${{ steps.generate_report.outputs.summary }}

- name: Add performance report to job summary
if: ${{ steps.generate_report.outputs.has-results == 'true' }}
run: |
echo "${{ steps.generate_report.outputs.summary }}" >> "$GITHUB_STEP_SUMMARY"

- name: No results warning
if: ${{ steps.generate_report.outputs.has-results == 'false' }}
run: |
echo "::warning::No performance results found in artifacts"
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
| Step | Action | Result |
| - | - | - |
| 1. | Deploy Kafka cluster with User Operator configured with more resources to handle load and also non-default `STRIMZI_WORK_QUEUE_SIZE` set to 2048. | Kafka cluster with User Operator is deployed and ready. |
| 2. | For each configured load level (1000 existing users), create N KafkaUsers to establish the load. | N KafkaUsers are created and ready, establishing baseline load on the User Operator. |
| 2. | For each configured load level (1000, 1500, 2000 existing users), create N KafkaUsers to establish the load. | N KafkaUsers are created and ready, establishing baseline load on the User Operator. |
| 3. | Perform 100 individual user modifications sequentially, measuring the latency of each modification. | Each modification latency is recorded independently. |
| 4. | Calculate latency statistics: min, max, average, P50, P95, and P99 percentiles from the 100 measurements. | Statistical analysis shows how single-user modification latency degrades as system load (number of existing users) increases. |
| 5. | Clean up all users and persist latency metrics to user-operator report directory. | Namespace is cleaned, latency data is saved showing how responsiveness changes at different load levels. |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import io.strimzi.systemtest.TestTags;
import io.strimzi.systemtest.performance.TimeHolder;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.extension.ExtensionContext;
Expand All @@ -32,6 +33,7 @@ public void testPlanExecutionStarted(TestPlan plan) {
LOGGER.info("=======================================================================");
testPlan = plan;
printSelectedTestClasses(testPlan);
TimeHolder.resetTimestamp();
}

public void testPlanExecutionFinished(TestPlan testPlan) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.performance;

import java.time.LocalDateTime;
import java.time.temporal.TemporalAccessor;

/**
* Shared timestamp for all performance tests to ensure unified logging across systemtest/target/performance/*
* The timestamp is reset at the start of each test run (including maven reruns) so each run gets a unique timestamp,
* but all tests within that run share the same timestamp.
*/
public final class TimeHolder {
private TimeHolder() {}

private static TemporalAccessor actualTime = LocalDateTime.now();

public static TemporalAccessor getActualTime() {
return actualTime;
}

/**
* Resets the timestamp to the current time. Called by the test execution listener at the start of each test run.
*/
public static void resetTimestamp() {
actualTime = LocalDateTime.now();
}
}
Loading