Skip to content

Chore: Minor refactors in sqlmesh test output #4755

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 50 additions & 46 deletions sqlmesh/core/console.py
Original file line number Diff line number Diff line change
Expand Up @@ -1957,26 +1957,29 @@ def log_test_results(self, result: ModelTextTestResult, target_dialect: str) ->
divider_length = 70

self._log_test_details(result)
self._print("\n")

message = (
f"Ran {result.testsRun} tests against {target_dialect} in {result.duration} seconds."
)
if result.wasSuccessful():
self._print("=" * divider_length)
self._print(
f"Successfully Ran {str(result.testsRun)} tests against {target_dialect}",
f"Successfully {message}",
style="green",
)
self._print("-" * divider_length)
else:
self._print("-" * divider_length)
self._print("Test Failure Summary")
self._print("Test Failure Summary", style="red")
self._print("=" * divider_length)
self._print(
f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}"
)
failures = len(result.failures) + len(result.errors)
self._print(f"{message} \n")

self._print(f"Failed tests ({failures}):")
for test, _ in result.failures + result.errors:
if isinstance(test, ModelTest):
self._print(f"Failure Test: {test.path}::{test.test_name}")
self._print("=" * divider_length)
self._print(f" {test.path}::{test.test_name}")
self._print("=" * divider_length, end="\n\n")

def _captured_unit_test_results(self, result: ModelTextTestResult) -> str:
with self.console.capture() as capture:
Expand Down Expand Up @@ -2499,19 +2502,24 @@ def show_linter_violations(
else:
self.log_warning(msg)

def _log_test_details(self, result: ModelTextTestResult) -> None:
def _log_test_details(
self, result: ModelTextTestResult, unittest_char_separator: bool = True
) -> None:
"""
This is a helper method that encapsulates the logic for logging the relevant unittest for the result.
The top level method (`log_test_results`) reuses `_log_test_details` differently based on the console.

Args:
result: The unittest test result that contains metrics like num success, fails, ect.
"""
tests_run = result.testsRun

if result.wasSuccessful():
self._print("\n", end="")
return

errors = result.errors
failures = result.failures
skipped = result.skipped
is_success = not (errors or failures)

infos = []
if failures:
Expand All @@ -2521,12 +2529,13 @@ def _log_test_details(self, result: ModelTextTestResult) -> None:
if skipped:
infos.append(f"skipped={skipped}")

self._print("\n", end="")
if unittest_char_separator:
self._print(f"\n{unittest.TextTestResult.separator1}\n\n", end="")

for (test_case, failure), test_failure_tables in zip_longest( # type: ignore
failures, result.failure_tables
):
self._print(unittest.TextTestResult.separator1)
self._print(unittest.TextTestResult.separator2)
self._print(f"FAIL: {test_case}")

if test_description := test_case.shortDescription():
Expand All @@ -2541,21 +2550,11 @@ def _log_test_details(self, result: ModelTextTestResult) -> None:
self._print("\n", end="")

for test_case, error in errors:
self._print(unittest.TextTestResult.separator1)
self._print(unittest.TextTestResult.separator2)
self._print(f"ERROR: {test_case}")
self._print(f"{unittest.TextTestResult.separator2}")
self._print(error)

# Output final report
self._print(unittest.TextTestResult.separator2)
test_duration_msg = f" in {result.duration:.3f}s" if result.duration else ""
self._print(
f"\nRan {tests_run} {'tests' if tests_run > 1 else 'test'}{test_duration_msg} \n"
)
self._print(
f"{'OK' if is_success else 'FAILED'}{' (' + ', '.join(infos) + ')' if infos else ''}"
)


def _cells_match(x: t.Any, y: t.Any) -> bool:
"""Helper function to compare two cells and returns true if they're equal, handling array objects."""
Expand Down Expand Up @@ -2836,14 +2835,19 @@ def log_test_results(self, result: ModelTextTestResult, target_dialect: str) ->
"font-weight": "bold",
"font-family": "Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace",
}

message = (
f"Ran {result.testsRun} tests against {target_dialect} in {result.duration} seconds."
)

if result.wasSuccessful():
success_color = {"color": "#008000"}
header = str(h("span", {"style": shared_style}, "-" * divider_length))
message = str(
h(
"span",
{"style": {**shared_style, **success_color}},
f"Successfully Ran {str(result.testsRun)} tests against {target_dialect}",
f"Successfully {message}",
)
)
footer = str(h("span", {"style": shared_style}, "=" * divider_length))
Expand All @@ -2855,31 +2859,31 @@ def log_test_results(self, result: ModelTextTestResult, target_dialect: str) ->
fail_shared_style = {**shared_style, **fail_color}
header = str(h("span", {"style": fail_shared_style}, "-" * divider_length))
message = str(h("span", {"style": fail_shared_style}, "Test Failure Summary"))
num_success = str(
h(
"span",
{"style": fail_shared_style},
f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}",
failed_tests = [
str(
h(
"span",
{"style": fail_shared_style},
f"Failed tests ({len(result.failures) + len(result.errors)}):",
)
)
)
failure_tests = []
]

for test, _ in result.failures + result.errors:
if isinstance(test, ModelTest):
failure_tests.append(
failed_tests.append(
str(
h(
"span",
{"style": fail_shared_style},
f"Failure Test: {test.model.name} {test.test_name}",
f" {test.model.name}::{test.test_name}",
)
)
)
failures = "<br>".join(failure_tests)
failures = "<br>".join(failed_tests)
footer = str(h("span", {"style": fail_shared_style}, "=" * divider_length))
error_output = widgets.Textarea(output, layout={"height": "300px", "width": "100%"})
test_info = widgets.HTML(
"<br>".join([header, message, footer, num_success, failures, footer])
)
test_info = widgets.HTML("<br>".join([header, message, footer, failures, footer]))
self.display(widgets.VBox(children=[test_info, error_output], layout={"width": "100%"}))


Expand Down Expand Up @@ -3202,21 +3206,21 @@ def log_success(self, message: str) -> None:
self._print(message)

def log_test_results(self, result: ModelTextTestResult, target_dialect: str) -> None:
message = f"Ran `{result.testsRun}` Tests Against `{target_dialect}`"

if result.wasSuccessful():
self._print(
f"**Successfully Ran `{str(result.testsRun)}` Tests Against `{target_dialect}`**\n\n"
)
self._print(f"**Successfully {message}**\n\n")
else:
self._print("```")
self._log_test_details(result)
self._log_test_details(result, unittest_char_separator=False)
self._print("```\n\n")

self._print(
f"**Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}**\n\n"
)
failures = len(result.failures) + len(result.errors)
self._print(f"**{message}**\n")
self._print(f"**Failed tests ({failures}):**")
for test, _ in result.failures + result.errors:
if isinstance(test, ModelTest):
self._print(f"* Failure Test: `{test.model.name}` - `{test.test_name}`\n\n")
self._print(f"`{test.model.name}`::`{test.test_name}`\n\n")

def log_skipped_models(self, snapshot_names: t.Set[str]) -> None:
if snapshot_names:
Expand Down
2 changes: 1 addition & 1 deletion sqlmesh/core/test/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,6 @@ def _run_single_test(

end_time = time.perf_counter()

combined_results.duration = end_time - start_time
combined_results.duration = round(end_time - start_time, 2)

return combined_results
10 changes: 3 additions & 7 deletions tests/core/test_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2281,7 +2281,7 @@ def test_test_output(tmp_path: Path) -> None:
)

assert "Ran 2 tests" in output
assert "FAILED (failures=1)" in output
assert "Failed tests (1):" in output

# Case 2: Ensure that the verbose log report is structured correctly
with capture_output() as captured_output:
Expand Down Expand Up @@ -2321,7 +2321,7 @@ def test_test_output(tmp_path: Path) -> None:
output = captured_output.stdout

assert "Ran 102 tests" in output
assert "FAILED (failures=51)" in output
assert "Failed tests (51):" in output

# Case 4: Test that wide tables are split into even chunks for default verbosity
rmtree(tmp_path / "tests")
Expand Down Expand Up @@ -2426,11 +2426,7 @@ def test_test_output_with_invalid_model_name(tmp_path: Path) -> None:
f"""Model '"invalid_model"' was not found at {wrong_test_file}"""
in mock_logger.call_args[0][0]
)
assert (
".\n----------------------------------------------------------------------\n\nRan 1 test in"
in output.stdout
)
assert "OK" in output.stdout
assert "Successfully Ran 1 test" in output.stdout


def test_number_of_tests_found(tmp_path: Path) -> None:
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/github/cicd/test_github_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ def test_run_all_test_failed(
assert (
"""sqlmesh.utils.errors.TestError: some error""" in test_checks_runs[2]["output"]["summary"]
)
assert """**Num Successful Tests: 0**""" in test_checks_runs[2]["output"]["summary"]
assert """Failed tests (1):""" in test_checks_runs[2]["output"]["summary"]

assert "SQLMesh - Prod Plan Preview" in controller._check_run_mapping
prod_plan_preview_checks_runs = controller._check_run_mapping[
Expand Down
1 change: 1 addition & 0 deletions tests/integrations/github/cicd/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -1853,6 +1853,7 @@ def test_pr_delete_model(
assert GithubCheckStatus(test_checks_runs[2]["status"]).is_completed
assert GithubCheckConclusion(test_checks_runs[2]["conclusion"]).is_success
assert test_checks_runs[2]["output"]["title"] == "Tests Passed"
print(test_checks_runs[2]["output"]["summary"])
assert (
test_checks_runs[2]["output"]["summary"].strip()
== "**Successfully Ran `3` Tests Against `duckdb`**"
Expand Down