-
Notifications
You must be signed in to change notification settings - Fork 6
Plugin Migration Guide
In AIV 2, each algorithm is designed as a standalone Python library and outputs a test result file that follows the testresult.schema.json
format. A minimal migration would require modifying an existing plugin to output the test result in the required format, which can be uploaded to the AIV portal.
For compatibility with the full AIV 2 features, the following changes are required:
- Updating the dependencies to include
aiverify-test-engine
- Modifying
__main__.py
to allow the algorithm to be called via a CLI - Updating the relative reference from
test-engine-core-modules
to the newaiverify_test_engine
library - Modifying the output to follow the
testresult.schema.json
format
We have migrated requirement.txt
to a pyproject.toml
setup. aiverify-test-engine
is now added as a dependency and an entrypoint to the CLI is created.
We also renamed the following files:
-
accumulated_local_effect.meta.json
->algo.meta.json
-
accumulated_local_effect.py
->algo.py
-
plugin_test.py
->algo_init.py
A plugin_init.py
file is created to allow the algorithm to be called via CLI. The inputs should be consistent with the specified input.schema.json
and call the underlying algorithm and test. Here's how the accumulated local effect CLI entrypoint looks like:
import argparse
from aiverify_accumulated_local_effect.algo_init import AlgoInit
from aiverify_test_engine.plugins.enums.model_type import ModelType
parser = argparse.ArgumentParser(description="Run the plugin test with specified parameters.")
def run():
parse_input_args()
invoke_accumulated_local_effect_plugin()
def parse_input_args():
global parser
parser.add_argument("--data_path", required=True, help="Path to the data file.")
parser.add_argument("--model_path", required=True, help="Path to the model file.")
parser.add_argument("--ground_truth_path", required=True, help="Path to the ground truth data file.")
parser.add_argument(
"--ground_truth",
required=True,
help="The ground truth column name in the data.",
)
parser.add_argument(
"--run_pipeline",
action=argparse.BooleanOptionalAction,
help="Whether to run the test as a pipeline (default: False).",
)
parser.add_argument(
"--model_type",
required=True,
choices=["CLASSIFICATION", "REGRESSION"],
help="The type of model (CLASSIFICATION or REGRESSION).",
)
parser.add_argument(
"--core_modules_path",
default="",
help="Path to the core modules (default: empty).",
)
def invoke_accumulated_local_effect_plugin():
# =====================================================================================
# NOTE: Do not modify the code below
# =====================================================================================
# Perform Plugin Testing
# Parse the arguments
args = parser.parse_args()
# Determine the value of run_pipeline
if args.run_pipeline is None:
run_pipeline = False # Default to False if not provided
else:
run_pipeline = args.run_pipeline
# Map string argument to ModelType enum
model_type = ModelType[args.model_type]
print("*" * 20)
# Debugging prints
print(
f"Running with the following arguments:\n"
f"Data Path: {args.data_path}\n"
f"Model Path: {args.model_path}\n"
f"Ground Truth Path: {args.ground_truth_path}\n"
f"Ground Truth: {args.ground_truth}\n"
f"Run Pipeline: {run_pipeline}\n"
f"Model Type: {model_type}\n"
f"Core Modules Path: {args.core_modules_path}"
)
print("*" * 20)
try:
# Create an instance of AlgoInit with defined paths and arguments and Run.
plugin_test = AlgoInit(
run_pipeline,
args.core_modules_path,
args.data_path,
args.model_path,
args.ground_truth_path,
args.ground_truth,
model_type,
)
plugin_test.run()
except Exception as exception:
print(f"Exception caught while running the plugin test: {str(exception)}")
if __name__ == "__main__":
run()
Next, we update the __main__.py
file:
import sys
from importlib.metadata import version
from pathlib import Path
from aiverify_accumulated_local_effect.plugin_init import run
def main() -> None:
"""
Print the version of test engine core
"""
print("*" * 20)
print(version_msg())
print("*" * 20)
# invoke algorithm
run()
def version_msg():
"""
Return the accumulated_local_effect version, location and Python powering it.
"""
python_version = sys.version
location = Path(__file__).resolve().parent.parent
return f"Accumulated Local Effect - {version('aiverify_accumulated_local_effect')} from \
{location} (Python {python_version})"
if __name__ == "__main__":
main()
Existing references to test-engine-core-modules
has to be updated to use the installed aiverify_test_engine
package.
For example in algo_init.py
:
core_modules_path = "../../../../test-engine-core-modules"
is updated to:
core_modules_path = Path(importlib.util.find_spec("aiverify_test_engine").origin).parent
We write the successful result to an output
folder with a results.json
file that complies with the testresult schema. You can use the new ITestArguments
and ITestResult
interfaces to verify that it follows the format. In our updated code we created a _generate_output_file
function that writes the json file:
from aiverify_test_engine.interfaces.itestresult import ITestArguments, ITestResult
def _generate_output_file(self, results, output_path: Path) -> None:
"""
Format the output results into the AI Verify Test Result and write to a JSON file
"""
f = open(str(Path(__file__).parent / "algo.meta.json"))
meta_file = json.load(f)
# Prepare test arguments
test_arguments = ITestArguments(
groundTruth=self._ground_truth,
modelType=self._model_type.name,
testDataset=self._data_path,
modelFile=self._model_path,
groundTruthDataset=self._ground_truth_path,
algorithmArgs={"run_pipeline": self._run_as_pipeline},
mode="upload",
)
# Create the output result
output = ITestResult(
gid=meta_file["gid"],
cid=meta_file["cid"],
version=meta_file.get("version"),
startTime=datetime.fromtimestamp(self._start_time),
timeTaken=round(self._time_taken, 4),
testArguments=test_arguments,
output=results,
)
output_json = output.model_dump_json(exclude_none=True, indent=4)
if validate_test_result_schema(json.loads(output_json)) is True:
with open(output_path, "w") as json_file:
json_file.write(output_json)
else:
raise RuntimeError("Failed test result schema validation")
We can then write the file and associated artifacts if successful:
if is_success:
# Print the output results
output_folder = Path.cwd() / "output"
output_folder.mkdir(parents=True, exist_ok=True)
json_file_path = output_folder / "results.json"
self._generate_output_file(results, json_file_path)
print("*" * 20)
print(f"check the results here : {json_file_path}")
print("*" * 20)