Skip to content

Commit

Permalink
Add verifier and fix test driver (#14)
Browse files Browse the repository at this point in the history
* Add verifier and testreport

* Updating verifier

* Verifier working with simple test report
  • Loading branch information
sven-oly authored Oct 14, 2022
1 parent 921b76e commit bfe3dfd
Show file tree
Hide file tree
Showing 9 changed files with 509 additions and 46 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*~
18 changes: 13 additions & 5 deletions executors/python/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,31 @@
class executor():
def __init__(self):
self.test_count = 0
self.debug = 1
self.debug = None

def handle_input(self):
while True:
inline = sys.stdin.readline()
if self.debug:
print('#INPUT = >%s<' % inline)
if inline == "#EXIT":
if inline[0:5] == "#EXIT":
break
elif inline == '#VERSION':
elif inline[0:8] == '#VERSION':
platform_info = {
'platform': 'python3 executor',
'icuVersion': 'no version',
}
self.outline(platform_info)
elif inline[0:1] == '#':
unknown_info = {
'platform': 'python3 executor',
'message': 'unknown command',
'input': inline
}
self.outline(unknown_info)
else:
test_data = json.loads(inline)
test_data['test_IDq'] = 'TEST ' + test_data['label']
test_data['test_ID'] = 'TEST ' + test_data['label']
test_data['python3_executor'] = True
self.test_count += 1
self.outline(test_data)
Expand All @@ -33,7 +40,8 @@ def handle_input(self):

def outline(self, json_data):
# Simply output to stdout
sys.stdout.write(json.dumps(json_data) + '\n')
# sys.stdout.write(json.dumps(json_data) + '\n')
print(json.dumps(json_data))
return


Expand Down
98 changes: 64 additions & 34 deletions testdriver/ddtargs.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,63 +35,93 @@ def __init__(self):
'number_fmt', 'lang_names', 'ALL']

class DdtArgs():
def __init__(self):
def __init__(self, args):
self.options = None # A simple namespace with each field

self.parser = argparse.ArgumentParser(
description='Process DDT Test Driver arguments')

# All more than one item in types
self.parser.add_argument('--test_type', '--type', '-t',
action='extend', nargs='*',
choices=type_options)
# All more than one item in exec list
self.parser.add_argument('--exec', action='extend', nargs='*',
help='Execution platforms') #, default='ALL')
self.parser.add_argument('--icu', default='LATEST')
self.parser.add_argument('--cldr', default='LATEST')

# Location of the data files
self.parser.add_argument(
'--file_base', default="",
help='Base directory for input, output, and report paths')
self.parser.add_argument('--input_path', default='testData')
self.parser.add_argument('--output_path', default='testResults')
self.parser.add_argument('--report_path', default='testReports')

self.parser.add_argument('--exec_mode', default='one_test')
self.parser.add_argument('--parallel_mode', default=None)
self.parser.add_argument('--run_limit', default=None)
setCommonArgs(self.parser)

self.parser.add_argument(
'--start_test', default=0,
help='number of tests to skip at start of the test data')

self.parser.add_argument(
'--progress_interval',
help="Interval between progress output printouts", default=None)
self.parser.add_argument(
'--per_execution', default=1,
help='How many tests are run in each invocation of an executor')
self.parser.add_argument(
'--custom_testfile', default=None, action='extend', nargs='*',
help='full path to test data file in JSON format')

self.parser.add_argument('--debug_level', default=None)

# For handling verification
self.parser.add_argument('--verifyonly', default=None)
self.parser.add_argument('--noverify', default=None) #
self.parser.add_argument('--custom_verifier', default=None) #
self.options = self.parser.parse_args(args)

def parse(self, args):
# Sets the parameters based on argument values
def parse(self):
return self.options

def getOptions(self):
return self.options


class VerifyArgs():
def __init__(self, args):
self.parser = argparse.ArgumentParser(
description='Process DDT Verifier arguments')

setCommonArgs(self.parser)
self.parser.add_argument('--verify_file_name', action='extend', nargs='*',
help='Files with expected results for verifying', default=None)

self.parser.add_argument('--test_verifier',
help='Flag to run in test mode', default=None)

self.options = self.parser.parse_args(args)
return

def getOptions(self):
return self.options

def getOptions(self):
return self.options

# Set up arguments common to both testDriver and verifier
def setCommonArgs(parser):

print('!!!!!! setCommonArgs')
# What data and executor(s) to verify
parser.add_argument('--test_type', '--type', '-t', '--test',
action='extend', nargs='*',
choices=type_options)
# All more than one item in exec list
parser.add_argument('--exec', action='extend', nargs='*',
help='Execution platforms') #, default='ALL')

# TODO: are these being used? How?
parser.add_argument('--icu', default='LATEST')
parser.add_argument('--cldr', default='LATEST')

# Location of the data files
parser.add_argument(
'--file_base', default="",
help='Base directory for input, output, and report paths')
parser.add_argument('--input_path', default='testData')
parser.add_argument('--output_path', default='testResults')
parser.add_argument('--report_path', default='testReports')

parser.add_argument('--exec_mode', default='one_test')
parser.add_argument('--parallel_mode', default=None)
parser.add_argument('--run_limit', default=None)

parser.add_argument(
'--custom_testfile', default=None, action='extend', nargs='*',
help='full path to test data file in JSON format')

parser.add_argument(
'--progress_interval',
help="Interval between progress output printouts", default=None)

parser.add_argument('--debug_level', default=None)

print('!!!!!! setCommonArgs')


def argsTestData():
Expand Down
8 changes: 4 additions & 4 deletions testdriver/testdriver.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def setArgs(self, argOptions):

try:
testData = ddtData.testDatasets[test_type]
print('#### testData[%s] = %s' % (test_type, testData))
newPlan.setTestData(testData)
except KeyError as err:
print('!!! No test data filename for %s' % test_type)
Expand All @@ -80,9 +79,9 @@ def parseArgs(self, args):
# to execute tests and verify results.

# Get all the arguments
argparse = ddtargs.DdtArgs()
argparse.parse(args)
# DEBUG print('OPTIONS: %s' % argparse.getOptions())
argparse = ddtargs.DdtArgs(args)
if self.debug:
print('OPTIONS: %s' % argparse.getOptions())

# Now use the argparse.options to set the values in the driver
self.setArgs(argparse.getOptions())
Expand All @@ -101,6 +100,7 @@ def runPlans(self):
def main(args):

driver = TestDriver()
print('ARGS = %s' % (args))
driver.parseArgs(args[1:])

driver.runPlans()
Expand Down
7 changes: 4 additions & 3 deletions testdriver/testplan.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,8 @@ def processBatchOfTests(self, testsToSend):
json_out = json.loads(item)
batchOut.append(json_out)
except BaseException as error:
print(' && Item %s. Error in= %s. Received: >%s<' %
(index, error, item))
print(' && Item %s. Error in= %s. Received (%d): >%s<' %
(index, error, len(item), item))
index += 1

return batchOut
Expand Down Expand Up @@ -378,7 +378,8 @@ def sendOneLine(self, input_line):
if not result.returncode:
return result.stdout
else:
print('$$$$$$$$$$$$$$$$ return code = %s' % result.returncode)
print('$$$$$$$$$$$$$$$$ ---> return code: %s' % result.returncode)

print(' ----> INPUT LINE= >%s<' % input_line)
print(' ----> STDOUT= >%s<' % result.stdout)
self.run_error_message = '!!!! ERROR IN EXECUTION: %s. STDERR = %s' %(
Expand Down
Empty file added verifier/__init__.py
Empty file.
75 changes: 75 additions & 0 deletions verifier/testreport.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import json

class TestReport():
# Holds information describing the results of running tests vs.
# the expected results.
def __init__(self):
self.timestamp = None
self.results = None
self.verify = None

self.title = ''

self.report_file_path = None
self.number_tests = None
self.failing_tests = []
self.tests_fail = 0
self.passing_tests = []
self.test_errors = []
self.error_count = 0
self.tests_pass = 0

self.test_type = None
self.executor = None

self.platform = None

self.missing_verify_data = []

def recordFail(self, test):
self.failing_tests.append(test)
self.tests_fail += 1

def recordPass(self, test):
self.passing_tests.append(test)
self.tests_pass += 1

def recordTestError(self, test):
self.test_errors.append(test)
self.error_count +=1

def recordMissingVerifyData(self, test):
self.missing_verify_data.append(test)

def summaryStatus(self):
return self.tests_faile == 0 and self.missing_verify_data == []

def createReport(self):
# Make a JSON object with the data
report = {}

# Fill in the important fields.
report['title'] = self.title

report['timestamp'] = self.timestamp
report['failCount'] = self.tests_fail
report['passCount'] = self.tests_pass
report['failingTests'] = self.failing_tests
report['missing_verify_data'] = self.missing_verify_data
report['test_error_count'] = self.error_count
report['test_errors'] = self.test_errors
self.report = report

return json.dumps(report)

def saveReport(self):
try:
file = open(self.report_file_path, mode='w', encoding='utf-8')
except BaseException as err:
std.err.write('!!! Cannot write report at %s: Error = %s' % (
self.report_file_path, err))
return None

self.createReport()
file.write(json.dumps(self.report))
file.close()
Loading

0 comments on commit bfe3dfd

Please sign in to comment.