Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: filtering specific checks #140

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 27 additions & 4 deletions gatorgrade/input/parse_config.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,25 @@
"""Returns the list of commands to be run through gatorgrader."""

from pathlib import Path
from thefuzz import fuzz


from gatorgrade.input.command_line_generator import generate_checks
from gatorgrade.input.in_file_path import parse_yaml_file
from gatorgrade.input.in_file_path import reformat_yaml_data


def parse_config(file: Path):
def parse_config(file: Path, check_include: str = None, check_exclude: str = None):
"""Parse the input yaml file and generate specified checks.

Args:
file: Yaml file containing gatorgrade and shell command checks
check_include: Description of checks to include
check_exclude: Description of checks to exclude
Returns:
Returns a dictionary that specifies shell commands and gatorgrade commands
Returns a tuple that contains:
- A dictionary specifying shell commands and gatorgrade commands
- A boolean variable indicating if there is a match for the specified checks
"""
# parse the YAML file using parse_yaml_file provided by gatorgrade
parsed_yaml_file = parse_yaml_file(file)
Expand All @@ -24,8 +30,25 @@ def parse_config(file: Path):
# use it to generate all of the checks;
# these will be valid checks that are now
# ready for execution with this tool
parse_con = generate_checks(reformat_yaml_data(parsed_yaml_file))
return parse_con
reformatted_yaml_data = reformat_yaml_data(parsed_yaml_file)
match = True
# Filter the reformat_yaml_data to only include specified checks
if check_include:
# Generate the checks that are included
check_list = [check for check in reformatted_yaml_data if fuzz.partial_ratio(check_include, check[1]['description']) >= 80]
match = True if len(check_list) > 0 else False
parse_con = generate_checks(check_list)
return (parse_con, match)

if check_exclude:
# Generate the checks that are excluded
check_list = [check for check in reformatted_yaml_data if fuzz.partial_ratio(check_exclude, check[1]['description']) < 80]
match = True if len(check_list) > 0 else False
parse_con = generate_checks(check_list)
return (parse_con, match)

parse_con = generate_checks(reformatted_yaml_data)
return (parse_con, match)
# return an empty list because of the fact that the
# parsing process did not return a list with content;
# allow the calling function to handle the empty list
Expand Down
95 changes: 49 additions & 46 deletions gatorgrade/main.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
"""Use Typer to run gatorgrade to run the checks and generate the yml file."""

import sys
from pathlib import Path
from typing import Tuple
Expand All @@ -12,21 +10,14 @@

# create an app for the Typer-based CLI

# define the emoji that will be prepended to the help message
gatorgrade_emoji = "🐊"

# create a Typer app that
# --> does not support completion
# --> has a specified help message with an emoji
app = typer.Typer(
add_completion=False,
help=f"{gatorgrade_emoji} Run the GatorGrader checks in the specified gatorgrade.yml file.",
)

# create a default console for printing with rich
console = Console()

# define constants used in this module
FILE = "gatorgrade.yml"
FAILURE = 1

Expand All @@ -45,55 +36,67 @@ def gatorgrade(
3. the name of the file or environment variable\
4. use 'env md GITHUB_STEP_SUMMARY' to create GitHub job summary in GitHub Action",
),
output_limit: int = typer.Option(
None,
"--output-limit",
"-l",
help="The maximum number of lines to store in an environment variable. Example: '--output-limit 1000'",
),
check_include: str = typer.Option(
None,
"--check-include",
"-i",
help="Description of the checks to include. Example: '--check-include \"Complete all TODOs\"'",
),
check_exclude: str = typer.Option(
None,
"--check-exclude",
"-e",
help="Description of the checks to exclude. Example: '--check-exclude \"Complete all TODOs\"'",
),
check_status: str = typer.Option(
None,
"--check-status",
"-s",
help="Filter checks by their status (pass or fail). Example: '--check-status pass'",
),
show_failures: bool = typer.Option(
False,
"--show-failures",
"-f",
help="Only show the failed checks.",
),
):
"""Run the GatorGrader checks in the specified gatorgrade.yml file."""
# if ctx.subcommand is None then this means
# that, by default, gatorgrade should run in checking mode
if ctx.invoked_subcommand is None:
# parse the provided configuration file
checks = parse_config(filename)
# there are valid checks and thus the
# tool should run them with run_checks
(checks, match) = parse_config(filename, check_include, check_exclude)

if len(checks) > 0:
checks_status = run_checks(checks, report)
# no checks were created and this means
# that, most likely, the file was not
# valid and thus the tool cannot run checks
checks_status = run_checks(
checks, report, output_limit, check_status, show_failures
)
else:
checks_status = False
console.print()
console.print(f"The file {filename} either does not exist or is not valid.")
if match is False:
if check_include:
console.print(
f"The check {check_include} does not exist in the file {filename}."
)
if check_exclude:
console.print(
f"The check {check_exclude} does not exist in the file {filename}."
)
else:
console.print(
f"The file {filename} either does not exist or is not valid."
)
console.print("Exiting now!")
console.print()
# at least one of the checks did not pass or
# the provided file was not valid and thus
# the tool should return a non-zero exit
# code to designate some type of failure

if checks_status is not True:
sys.exit(FAILURE)


# @app.command()
# def generate(
# root: Path = typer.Argument(
# Path("."),
# help="Root directory of the assignment",
# exists=True,
# dir_okay=True,
# writable=True,
# ),
# paths: List[Path] = typer.Option(
# ["*"],
# help="Paths to recurse through and generate checks for",
# exists=False,
# ),
# ):
# """Generate a gatorgrade.yml file."""
# targets = []
# for path in paths:
# targets.extend(glob.iglob(path.as_posix(), recursive=True))
# generate_config(targets, root.as_posix())


if __name__ == "__main__":
app()
165 changes: 96 additions & 69 deletions gatorgrade/output/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,39 @@ def create_markdown_report_file(json: dict) -> str:
return markdown_contents


def truncate_report(report_output_data_json: dict, output_limit: int = None) -> str:
"""Truncate the json report to the maximum number of lines allowed.

Args:
report_output_data_json: the json dictionary that will be used or converted to md
output_limit: the maximum number of lines to display in the output
"""
# Convert the JSON dictionary to a formatted string
report_str = json.dumps(report_output_data_json, indent=4)

# Split the string into lines
report_lines = report_str.split("\n")

# If the number of lines is within the limit, return the full report
if output_limit is None or len(report_lines) <= output_limit:
return report_str

# Otherwise, truncate the report to the maximum number of lines
truncated_report_lines = report_lines[:output_limit]

# Convert the truncated report back to a JSON string
truncated_report_str = "\n".join(truncated_report_lines)

# Add a trailing ellipsis to indicate the report was truncated
truncated_report_str += "\n..."

return truncated_report_str


def configure_report(
report_params: Tuple[str, str, str], report_output_data_json: dict
report_params: Tuple[str, str, str],
report_output_data_json: dict,
output_limit: int = None,
):
"""Put together the contents of the report depending on the inputs of the user.

Expand All @@ -209,6 +240,7 @@ def configure_report(
report_params[1]: json or md
report_params[2]: name of the file or env
report_output_data: the json dictionary that will be used or converted to md
output_limit: the maximum number of characters to display in the output
"""
report_format = report_params[0]
report_type = report_params[1]
Expand Down Expand Up @@ -285,95 +317,90 @@ def write_json_or_md_file(file_name, content_type, content):


def run_checks(
checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str]
checks: List[Union[ShellCheck, GatorGraderCheck]],
report: Tuple[str, str, str],
output_limit: int = None,
check_status: str = None,
show_failures: bool = False,
check_include: str = None,
check_exclude: str = None,
) -> bool:
"""Run shell and GatorGrader checks and display whether each has passed or failed.

Also, print a list of all failed checks with their diagnostics and a summary message that
shows the overall fraction of passed checks.

Args:
checks: The list of shell and GatorGrader checks to run.
"""
results = []
# run each of the checks

# Run checks and gather results
for check in checks:
result = None
command_ran = None
# run a shell check; this means
# that it is going to run a command
# in the shell as a part of a check;
# store the command that ran in the
# field called run_command that is
# inside of a CheckResult object but
# not initialized in the constructor

# Run shell or GatorGrader checks
if isinstance(check, ShellCheck):
result = _run_shell_check(check)
command_ran = check.command
result.run_command = command_ran
# run a check that GatorGrader implements
elif isinstance(check, GatorGraderCheck):
result = _run_gg_check(check)
# check to see if there was a command in the
# GatorGraderCheck. This code finds the index of the
# word "--command" in the check.gg_args list if it
# is available (it is not available for all of
# the various types of GatorGraderCheck instances),
# and then it adds 1 to that index to get the actual
# command run and then stores that command in the
# result.run_command field that is initialized to
# an empty string in the constructor for CheckResult
if "--command" in check.gg_args:
index_of_command = check.gg_args.index("--command")
index_of_new_command = int(index_of_command) + 1
index_of_new_command = index_of_command + 1
result.run_command = check.gg_args[index_of_new_command]
# there were results from running checks
# and thus they must be displayed
if result is not None:
result.print()
results.append(result)
# determine if there are failures and then display them
failed_results = list(filter(lambda result: not result.passed, results))
# print failures list if there are failures to print
# and print what ShellCheck command that Gatorgrade ran
if len(failed_results) > 0:
print("\n-~- FAILURES -~-\n")
for result in failed_results:
# main.console.print("This is a result")
# main.console.print(result)
result.print(show_diagnostic=True)
# this result is an instance of CheckResult
# that has a run_command field that is some
# value that is not the default of an empty
# string and thus it should be displayed;
# the idea is that displaying this run_command
# will give the person using Gatorgrade a way
# to quickly run the command that failed
if result.run_command != "":
rich.print(
f"[blue] → Run this command: [green]{result.run_command}\n"
)
# determine how many of the checks passed and then
# compute the total percentage of checks passed
passed_count = len(results) - len(failed_results)
# prevent division by zero if no results
if len(results) == 0:
percent = 0

if result:
# Filter checks by status if specified
if check_status == "pass" and result.passed:
results.append(result)
elif check_status == "fail" and not result.passed:
results.append(result)
elif not check_status: # No specific status filter
results.append(result)

# Filter by include/exclude criteria
filtered_results = results
if check_include:
filtered_results = [
r for r in filtered_results if check_include in r.description
]

if check_exclude:
filtered_results = [
r for r in filtered_results if check_exclude not in r.description
]

# Print results based on the filtered results
if show_failures:
# Print only failures
for result in filtered_results:
if not result.passed:
result.print(show_diagnostic=True)
if result.run_command:
rich.print(
f"[blue] → Run this command: [green]{result.run_command}\n"
)
else:
percent = round(passed_count / len(results) * 100)
# if the report is wanted, create output in line with their specifications
# Print all results
for result in filtered_results:
if not result.passed:
result.print(show_diagnostic=True)
if result.run_command:
rich.print(
f"[blue] → Run this command: [green]{result.run_command}\n"
)
else:
result.print() # Print normally for passing checks

# Generate summary
failed_results = [r for r in results if not r.passed]
passed_count = len(results) - len(failed_results)
percent = round(passed_count / len(results) * 100) if results else 0

if all(report):
report_output_data = create_report_json(passed_count, results, percent)
configure_report(report, report_output_data)
# compute summary results and display them in the console

summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!"
summary_color = "green" if passed_count == len(results) else "bright white"
print_with_border(summary, summary_color)
# determine whether or not the run was a success or not:
# if all of the tests pass then the function returns True;
# otherwise the function must return False
summary_status = True if passed_count == len(results) else False
return summary_status

return passed_count == len(results)


def print_with_border(text: str, rich_color: str):
Expand Down
Loading
Loading