Skip to content

Commit

Permalink
Add support of reporting subtest
Browse files Browse the repository at this point in the history
when one uses `pytest-subtests` plugin, this plugin was
able to report only the last report from the test, which
is passing always (if no setup/teardown issues, or failure outside
of the context of subtests), and the subtest failure were lost
and not counted/reported.

this change is fixing this, and collects all of the reports during a test
and report them each on it's own.

this introduce a new field to the output of a test report, `subtest` if it's
a report from subtest, it would be the message of that subtest.

Fixes: #28
  • Loading branch information
fruch committed Jul 25, 2024
1 parent 6ee3e1b commit e3d4efa
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 16 deletions.
30 changes: 17 additions & 13 deletions pytest_elk_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def __init__(self, config):
self.session_data["username"] = get_username()
self.session_data["hostname"] = socket.gethostname()
self.test_data = defaultdict(dict)
self.reports = {}
self.reports = defaultdict(list)
self.config = config
self.is_slave = False

Expand All @@ -225,13 +225,13 @@ def cache_report(self, report_item, outcome):
nodeid = getattr(report_item, "nodeid", report_item)
# local hack to handle xdist report order
slavenode = getattr(report_item, "node", None)
self.reports[nodeid, slavenode] = (report_item, outcome)
self.reports[nodeid, slavenode].append((report_item, outcome))

def get_report(self, report_item):
def get_reports(self, report_item):
nodeid = getattr(report_item, "nodeid", report_item)
# local hack to handle xdist report order
slavenode = getattr(report_item, "node", None)
return self.reports.get((nodeid, slavenode), None)
return self.reports.get((nodeid, slavenode), [])

@staticmethod
def get_failure_messge(item_report):
Expand Down Expand Up @@ -281,15 +281,16 @@ def pytest_runtest_logreport(self, report):
if report.when == "teardown":
# in xdist, report only on worker nodes
if self.get_worker_id() != "master":
old_report = self.get_report(report)
if report.passed and old_report:
self.report_test(old_report[0], old_report[1])
if report.failed and old_report:
self.report_test(
report, old_report[1] + " & error", old_report=old_report[0]
)
if report.skipped:
self.report_test(report, "skipped")
old_reports = self.get_reports(report)
for old_report in old_reports:
if report.passed and old_report:
self.report_test(old_report[0], old_report[1])
if report.failed and old_report:
self.report_test(
report, old_report[1] + " & error", old_report=old_report[0]
)
if report.skipped:
self.report_test(report, "skipped")

def report_test(self, item_report, outcome, old_report=None):
self.stats[outcome] += 1
Expand All @@ -302,6 +303,9 @@ def report_test(self, item_report, outcome, old_report=None):
markers=item_report.keywords,
**self.session_data,
)
context = getattr(item_report, "context", None)
if context:
test_data.update(subtest=context.msg)
test_data.update(self.test_data[item_report.nodeid])
del self.test_data[item_report.nodeid]

Expand Down
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,5 @@ pytest-cov
tox
wheel>=0.33.0
pytest-xdist
pytest-subtests
six
56 changes: 53 additions & 3 deletions tests/test_elk_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,12 @@ def fin():
def test_skip_in_teardown(skip_in_teardown):
pass
def test_failing_subtests(subtests):
with subtests.test("failed subtest"):
raise Exception("should fail")
with subtests.test("succcess subtest"):
pass
"""
)

Expand All @@ -93,12 +99,12 @@ def test_skip_in_teardown(skip_in_teardown):
last_report = json.loads(requests_mock.request_history[-1].text)
assert last_report["stats"] == {
"error": 1,
"failure": 2,
"failure": 3,
"failure & error": 1,
"passed": 0,
"passed": 2,
"skipped & error": 1,
"passed & error": 1,
"skipped": 3,
"skipped": 4,
"xfailed": 1,
"xpass": 1,
"error & error": 0,
Expand Down Expand Up @@ -531,3 +537,47 @@ def test_1(elk_reporter):
assert (
not requests_mock.called
), "Requests are not made to Elasticsearch when es_post_reports is False"


def test_subtests(testdir, requests_mock): # pylint: disable=redefined-outer-name
"""Make sure subtests are identified and reported."""

# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
def test_failing_subtests(subtests):
with subtests.test("failed subtest"):
raise Exception("should fail")
with subtests.test("success subtest"):
pass
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("--es-address=127.0.0.1:9200", "-v")

# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_failing_subtests * SUBFAIL*"])
result.stdout.fnmatch_lines(["*::test_failing_subtests * SUBPASS*"])
result.stdout.fnmatch_lines(["*::test_failing_subtests PASSED*"])

# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1

# validate each subtest is being reported on its own
report = json.loads(requests_mock.request_history[-2].text)
assert report["name"] == "test_subtests.py::test_failing_subtests"
assert "subtest" not in report
assert report["outcome"] == "passed"

report = json.loads(requests_mock.request_history[-3].text)
assert report["name"] == "test_subtests.py::test_failing_subtests"
assert report["subtest"] == "success subtest"
assert report["outcome"] == "passed"

report = json.loads(requests_mock.request_history[-4].text)
assert report["name"] == "test_subtests.py::test_failing_subtests"
assert report["subtest"] == "failed subtest"
assert report["outcome"] == "failure"

0 comments on commit e3d4efa

Please sign in to comment.