diff --git a/README.rst b/README.rst index 1eb46d0e..7274124a 100644 --- a/README.rst +++ b/README.rst @@ -1230,6 +1230,14 @@ To have an output in json format: This will output an expanded (meaning scenario outlines will be expanded to several scenarios) Cucumber format. +A similar report is the junit format. It follows this schema from `Jenkins Junit Schema `_ + +To have an output in junit format: + +:: + + pytest --cucumberjunit= + To enable gherkin-formatted output on terminal, use `--gherkin-terminal-reporter` in conjunction with the `-v` or `-vv` options: :: diff --git a/src/pytest_bdd/plugin.py b/src/pytest_bdd/plugin.py index 2c06f71b..a52cf2bc 100644 --- a/src/pytest_bdd/plugin.py +++ b/src/pytest_bdd/plugin.py @@ -8,7 +8,8 @@ import pytest from typing_extensions import ParamSpec -from . import cucumber_json, generation, gherkin_terminal_reporter, given, reporting, then, when +from . import generation, given, then, when +from .reports import cucumber_json, cucumber_junit, gherkin_terminal_reporter, reporting from .utils import CONFIG_STACK if TYPE_CHECKING: @@ -59,6 +60,7 @@ def pytest_addoption(parser: Parser) -> None: """Add pytest-bdd options.""" add_bdd_ini(parser) cucumber_json.add_options(parser) + cucumber_junit.add_options(parser) generation.add_options(parser) gherkin_terminal_reporter.add_options(parser) @@ -72,6 +74,7 @@ def pytest_configure(config: Config) -> None: """Configure all subplugins.""" CONFIG_STACK.append(config) cucumber_json.configure(config) + cucumber_junit.configure(config) gherkin_terminal_reporter.configure(config) @@ -80,6 +83,7 @@ def pytest_unconfigure(config: Config) -> None: if CONFIG_STACK: CONFIG_STACK.pop() cucumber_json.unconfigure(config) + cucumber_junit.unconfigure(config) @pytest.hookimpl(hookwrapper=True) diff --git a/src/pytest_bdd/reports/__init__.py b/src/pytest_bdd/reports/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/pytest_bdd/cucumber_json.py b/src/pytest_bdd/reports/cucumber_json.py similarity index 95% rename from src/pytest_bdd/cucumber_json.py rename to src/pytest_bdd/reports/cucumber_json.py index 1a072573..79d11af1 100644 --- a/src/pytest_bdd/cucumber_json.py +++ b/src/pytest_bdd/reports/cucumber_json.py @@ -62,12 +62,13 @@ def _get_result(self, step: dict[str, Any], report: TestReport, error_message: b :return: `dict` in form {"status": "", ["error_message": ""]} """ result: dict[str, Any] = {} - if report.passed or not step["failed"]: # ignore setup/teardown + if report.skipped: + reason = report.longrepr[2][report.longrepr[2].find(":") + 2 :] + result = {"status": "skipped", "skipped_message": reason} + elif report.passed or not step["failed"]: # ignore setup/teardown result = {"status": "passed"} elif report.failed: result = {"status": "failed", "error_message": str(report.longrepr) if error_message else ""} - elif report.skipped: - result = {"status": "skipped"} result["duration"] = int(math.floor((10**9) * step["duration"])) # nanosec return result diff --git a/src/pytest_bdd/reports/cucumber_junit.py b/src/pytest_bdd/reports/cucumber_junit.py new file mode 100644 index 00000000..ca8ce678 --- /dev/null +++ b/src/pytest_bdd/reports/cucumber_junit.py @@ -0,0 +1,131 @@ +"""Cucumber junit output formatter.""" + +from __future__ import annotations + +import typing +import xml.dom.minidom + +from .cucumber_json import LogBDDCucumberJSON + +if typing.TYPE_CHECKING: + from _pytest.config import Config + from _pytest.config.argparsing import Parser + from _pytest.terminal import TerminalReporter + +SYSTEM_OUT_DEFAULT_MESSAGE_LENGTH = 61 +SYSTEM_OUT_MINIMUM_DOTS = 5 +SYSTEM_OUT_INITIAL_INTEND = " " + + +def add_options(parser: Parser) -> None: + """Add pytest-bdd options.""" + group = parser.getgroup("bdd", "Cucumber JSON") + group.addoption( + "--cucumberjunit", + "--cucumber-junit", + action="store", + dest="cucumber_junit_path", + metavar="path", + default=None, + help="create cucumber junit style report file at given path.", + ) + + +def configure(config: Config) -> None: + cucumber_junit_path = config.option.cucumber_junit_path + # prevent opening junit log on worker nodes (xdist) + if cucumber_junit_path and not hasattr(config, "workerinput"): + config._bddcucumberjunit = LogBDDCucumberJUNIT(cucumber_junit_path) # type: ignore[attr-defined] + config.pluginmanager.register(config._bddcucumberjunit) # type: ignore[attr-defined] + + +def unconfigure(config: Config) -> None: + xml = getattr(config, "_bddcucumberjunit", None) # type: ignore[attr-defined] + if xml is not None: + del config._bddcucumberjunit # type: ignore[attr-defined] + config.pluginmanager.unregister(xml) + + +class LogBDDCucumberJUNIT(LogBDDCucumberJSON): + """Logging plugin for cucumber like junit output.""" + + def _join_and_pad(self, str1, str2, total_length=SYSTEM_OUT_DEFAULT_MESSAGE_LENGTH): + remaining = total_length - len(str1) - len(str2) - SYSTEM_OUT_MINIMUM_DOTS + + if remaining >= 0: + return SYSTEM_OUT_INITIAL_INTEND + str1 + "." * (remaining + SYSTEM_OUT_MINIMUM_DOTS) + str2 + else: + return self._join_and_pad(str1[:remaining], str2, total_length) + + def _generate_xml_report(self) -> xml.dom.minidom.Document: + document = xml.dom.minidom.Document() + + root = document.createElement("testsuite") + root.setAttribute("name", "pytest-bdd.cucumber.junit") + no_of_tests = 0 + no_of_skipped = 0 + no_of_failures = 0 + no_of_errors = 0 + scenario_time = 0 + + for feature in self.features.values(): + for test_case in feature["elements"]: + no_of_tests += 1 + test_case_doc = document.createElement("testcase") + test_case_doc.setAttribute("classname", feature["name"]) + if test_case["keyword"] == "Scenario Outline": + params = test_case["id"][test_case["id"].find("[") + 1 : -1] + name = f'{test_case["name"]} - ({params})' + else: + name = test_case["name"] + + test_case_doc.setAttribute("name", name) + + failure = False + skipped = False + failure_doc = document.createElement("failure") + skipped_doc = document.createElement("skipped") + case_time = 0 + + text = "\n" + for step in test_case["steps"]: + text += self._join_and_pad(f'{step["keyword"]} {step["name"]}', step["result"]["status"]) + "\n" + case_time += step["result"]["duration"] + if step["result"]["status"] == "failed": + failure = True + failure_doc.appendChild(document.createTextNode(step["result"]["error_message"])) + elif step["result"]["status"] == "skipped": + skipped = True + skipped_doc.appendChild(document.createTextNode(step["result"]["skipped_message"])) + test_case_doc.setAttribute("time", str(case_time)) + if failure: + test_case_doc.appendChild(failure_doc) + no_of_failures += 1 + no_of_tests -= 1 + elif skipped: + test_case_doc.appendChild(skipped_doc) + no_of_skipped += 1 + no_of_tests -= 1 + + system_out = document.createElement("system-out") + system_out.appendChild(document.createTextNode(text + "\n")) + test_case_doc.appendChild(system_out) + root.appendChild(test_case_doc) + scenario_time += case_time + + root.setAttribute("tests", str(no_of_tests)) + root.setAttribute("skipped", str(no_of_skipped)) + root.setAttribute("failures", str(no_of_failures)) + root.setAttribute("errors", str(no_of_errors)) + root.setAttribute("time", str(scenario_time)) + + document.appendChild(root) + return document + + def pytest_sessionfinish(self) -> None: + document = self._generate_xml_report() + with open(self.logfile, "w", encoding="utf-8") as logfile: + document.writexml(logfile, indent=" ", addindent=" ", newl="\n", encoding="utf-8") + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated junit file: {self.logfile}") diff --git a/src/pytest_bdd/gherkin_terminal_reporter.py b/src/pytest_bdd/reports/gherkin_terminal_reporter.py similarity index 100% rename from src/pytest_bdd/gherkin_terminal_reporter.py rename to src/pytest_bdd/reports/gherkin_terminal_reporter.py diff --git a/src/pytest_bdd/reporting.py b/src/pytest_bdd/reports/reporting.py similarity index 99% rename from src/pytest_bdd/reporting.py rename to src/pytest_bdd/reports/reporting.py index 2c4a4a2e..e9c1ddf6 100644 --- a/src/pytest_bdd/reporting.py +++ b/src/pytest_bdd/reports/reporting.py @@ -17,7 +17,7 @@ from _pytest.reports import TestReport from _pytest.runner import CallInfo - from .parser import Feature, Scenario, Step + from ..parser import Feature, Scenario, Step class StepReport: diff --git a/tests/feature/reports/__init__.py b/tests/feature/reports/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/feature/reports/cucumber_helper.py b/tests/feature/reports/cucumber_helper.py new file mode 100644 index 00000000..48531f7d --- /dev/null +++ b/tests/feature/reports/cucumber_helper.py @@ -0,0 +1,103 @@ +import textwrap + + +class OfType: + """Helper object to help compare object type to initialization type""" + + def __init__(self, type: type | None = None) -> None: + self.type = type + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.type) if self.type else True + + +def create_test(pytester): + pytester.makefile( + ".ini", + pytest=textwrap.dedent( + """ + [pytest] + markers = + scenario-passing-tag + scenario-failing-tag + scenario-outline-passing-tag + feature-tag + """ + ), + ) + pytester.makefile( + ".feature", + test=textwrap.dedent( + """ + @feature-tag + Feature: One passing scenario, one failing scenario + + @scenario-passing-tag + Scenario: Passing + Given a passing step + And some other passing step + + @scenario-failing-tag + Scenario: Failing + Given a passing step + And a failing step + + @scenario-outline-passing-tag + Scenario Outline: Passing outline + Given type and value + + Examples: example1 + | type | value | + | str | hello | + | int | 42 | + | float | 1.0 | + + Scenario: Skipping test + Given a skipping step + """ + ), + ) + pytester.makepyfile( + textwrap.dedent( + """ + import pytest + from pytest_bdd import given, when, scenario, parsers + + @given('a passing step') + def _(): + return 'pass' + + @given('some other passing step') + def _(): + return 'pass' + + @given('a failing step') + def _(): + raise Exception('Error') + + @given('a skipping step') + def _(): + pytest.skip('skipping') + + @given(parsers.parse('type {type} and value {value}')) + def _(): + return 'pass' + + @scenario('test.feature', 'Passing') + def test_passing(): + pass + + @scenario('test.feature', 'Failing') + def test_failing(): + pass + + @scenario('test.feature', 'Passing outline') + def test_passing_outline(): + pass + + @scenario('test.feature', 'Skipping test') + def test_skipping(): + pass + """ + ) + ) diff --git a/tests/feature/test_cucumber_json.py b/tests/feature/reports/test_cucumber_json.py similarity index 66% rename from tests/feature/test_cucumber_json.py rename to tests/feature/reports/test_cucumber_json.py index d3897b77..468bf2f5 100644 --- a/tests/feature/test_cucumber_json.py +++ b/tests/feature/reports/test_cucumber_json.py @@ -4,114 +4,27 @@ import json import os.path -import textwrap -from typing import TYPE_CHECKING, Any +from typing import Any -if TYPE_CHECKING: - from _pytest.pytester import Pytester, RunResult +from _pytest.pytester import Pytester, RunResult +from .cucumber_helper import OfType, create_test -def runandparse(pytester: Pytester, *args: Any) -> tuple[RunResult, list[dict[str, Any]]]: - """Run tests in testdir and parse json output.""" - resultpath = pytester.path.joinpath("cucumber.json") - result = pytester.runpytest(f"--cucumberjson={resultpath}", "-s", *args) - with resultpath.open() as f: + +def run_and_parse(pytester: Pytester, *args: Any) -> tuple[RunResult, list[dict[str, Any]]]: + """Run tests in test-dir and parse json output.""" + result_path = pytester.path.joinpath("cucumber.json") + result = pytester.runpytest(f"--cucumberjson={result_path}", "-s", *args) + with result_path.open() as f: jsonobject = json.load(f) return result, jsonobject -class OfType: - """Helper object to help compare object type to initialization type""" - - def __init__(self, type: type | None = None) -> None: - self.type = type - - def __eq__(self, other: object) -> bool: - return isinstance(other, self.type) if self.type else True - - def test_step_trace(pytester): """Test step trace.""" - pytester.makefile( - ".ini", - pytest=textwrap.dedent( - """ - [pytest] - markers = - scenario-passing-tag - scenario-failing-tag - scenario-outline-passing-tag - feature-tag - """ - ), - ) - pytester.makefile( - ".feature", - test=textwrap.dedent( - """ - @feature-tag - Feature: One passing scenario, one failing scenario - - @scenario-passing-tag - Scenario: Passing - Given a passing step - And some other passing step - - @scenario-failing-tag - Scenario: Failing - Given a passing step - And a failing step - - @scenario-outline-passing-tag - Scenario Outline: Passing outline - Given type and value - - Examples: example1 - | type | value | - | str | hello | - | int | 42 | - | float | 1.0 | - """ - ), - ) - pytester.makepyfile( - textwrap.dedent( - """ - import pytest - from pytest_bdd import given, when, scenario, parsers - - @given('a passing step') - def _(): - return 'pass' - - @given('some other passing step') - def _(): - return 'pass' - - @given('a failing step') - def _(): - raise Exception('Error') - - @given(parsers.parse('type {type} and value {value}')) - def _(): - return 'pass' - - @scenario('test.feature', 'Passing') - def test_passing(): - pass - - @scenario('test.feature', 'Failing') - def test_failing(): - pass - - @scenario('test.feature', 'Passing outline') - def test_passing_outline(): - pass - """ - ) - ) - result, jsonobject = runandparse(pytester) - result.assert_outcomes(passed=4, failed=1) + create_test(pytester) + result, jsonobject = run_and_parse(pytester) + result.assert_outcomes(passed=4, failed=1, skipped=1) assert result.ret expected = [ @@ -222,6 +135,24 @@ def test_passing_outline(): "id": "test_passing_outline[float-1.0]", "name": "Passing outline", }, + { + "description": "", + "id": "test_skipping", + "keyword": "Scenario", + "line": 24, + "name": "Skipping test", + "steps": [ + { + "keyword": "Given", + "line": 25, + "match": {"location": ""}, + "name": "a skipping step", + "result": {"skipped_message": "skipping", "status": "skipped", "duration": OfType(int)}, + } + ], + "tags": [], + "type": "scenario", + }, ], "id": os.path.join("test_step_trace0", "test.feature"), "keyword": "Feature", diff --git a/tests/feature/reports/test_cucumber_junit.py b/tests/feature/reports/test_cucumber_junit.py new file mode 100644 index 00000000..7d719b12 --- /dev/null +++ b/tests/feature/reports/test_cucumber_junit.py @@ -0,0 +1,75 @@ +"""Test cucumber junit output.""" + +from __future__ import annotations + +import xml.dom.minidom +from typing import Any + +from _pytest.pytester import Pytester, RunResult + +from .cucumber_helper import create_test + +FEATURE_NAME = "One passing scenario, one failing scenario" + + +def run_and_parse(pytester: Pytester, *args: Any) -> tuple[RunResult, xml.dom.minidom.Document]: + """Run tests in test-dir and parse xml output.""" + result_path = pytester.path.joinpath("cucumber.xml") + result = pytester.runpytest(f"--cucumberjunit={result_path}", "-s", *args) + with result_path.open() as f: + xmlobject = xml.dom.minidom.parseString(f.read()) + return result, xmlobject + + +def test_step_trace(pytester): + """Test step trace.""" + create_test(pytester) + result, xmlobject = run_and_parse(pytester) + result.assert_outcomes(passed=4, failed=1, skipped=1) + + assert result.ret + + test_suite = xmlobject.firstChild + assert test_suite.localName == "testsuite" + + test_suite_attributes = dict(test_suite.attributes.items()) + assert test_suite_attributes["name"] == "pytest-bdd.cucumber.junit" + assert test_suite_attributes["tests"] == "4" + assert test_suite_attributes["skipped"] == "1" + assert test_suite_attributes["errors"] == "0" + assert test_suite_attributes["failures"] == "1" + assert isinstance(float(test_suite_attributes["time"]), float) + + test_cases = [test_case for test_case in test_suite.childNodes if isinstance(test_case, xml.dom.minidom.Element)] + assert all(test_case.localName == "testcase" for test_case in test_cases) + assert all(test_case.attributes["classname"].value == FEATURE_NAME for test_case in test_cases) + assert all(isinstance(float(test_case.attributes["time"].value), float) for test_case in test_cases) + + assert test_cases[0].attributes["name"].value == "Passing" + assert test_cases[1].attributes["name"].value == "Failing" + assert test_cases[2].attributes["name"].value == "Passing outline - (str-hello)" + assert test_cases[3].attributes["name"].value == "Passing outline - (int-42)" + assert test_cases[4].attributes["name"].value == "Passing outline - (float-1.0)" + + [test_output] = [child for child in test_cases[0].childNodes if isinstance(child, xml.dom.minidom.Element)] + assert test_output.nodeName == "system-out" + assert test_output.firstChild.data == ( + "\n" + " Given a passing step...................................passed\n" + " And some other passing step............................passed\n" + "\n" + ) + + test_outputs = [child for child in test_cases[1].childNodes if isinstance(child, xml.dom.minidom.Element)] + assert test_outputs[0].nodeName == "failure" + + assert "Exception: Error" in test_outputs[0].firstChild.data + assert "test_step_trace.py:14: Exception" in test_outputs[0].firstChild.data + + assert test_outputs[1].nodeName == "system-out" + assert test_outputs[1].firstChild.data == ( + "\n" + " Given a passing step...................................passed\n" + " And a failing step.....................................failed\n" + "\n" + ) diff --git a/tests/feature/test_gherkin_terminal_reporter.py b/tests/feature/reports/test_gherkin_terminal_reporter.py similarity index 100% rename from tests/feature/test_gherkin_terminal_reporter.py rename to tests/feature/reports/test_gherkin_terminal_reporter.py diff --git a/tests/feature/test_report.py b/tests/feature/reports/test_report.py similarity index 100% rename from tests/feature/test_report.py rename to tests/feature/reports/test_report.py