-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgtester.py
127 lines (107 loc) · 3.49 KB
/
gtester.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/usr/bin/env python3
import subprocess
import sys
import json
from pprint import pprint
from pathlib import Path
gcolor = "--gtest_color=yes"
gtest_args = [gcolor]
failed_indicator = "[ FAILED ] "
passed_indicator = "[ PASSED ] "
summary_indicator = "[==========] "
config_path_prefix = Path(".gtester")
config_name = "config.json"
config_path_prefix.mkdir(exist_ok=True)
config_path = (config_path_prefix / config_name)
test_suit_separator = "] \x1b[m"
failing_flag = "--run_failing"
exe_flag = "--exe"
def run_tests(exe, tests=dict(), override_failing_storage=True):
check_for_failed = True
tag_test = False
skip_first_failed_tag = True
if len(tests) == 0:
gtest_filter = "*"
else:
gtest_filter = ":".join(tests.values())
gtest_args.append('--gtest_filter=' + gtest_filter)
args = [exe] + gtest_args
popen = subprocess.Popen(" ".join(args), shell=True,
stdout=subprocess.PIPE, universal_newlines=True)
tests_output = dict()
count = 1
for line in popen.stdout:
pl = line[:-1]
if passed_indicator in pl:
check_for_failed = False
if summary_indicator in pl:
tag_test = True
print(pl)
if tag_test and not check_for_failed and failed_indicator in pl:
p = pl.find(test_suit_separator)
test_str = pl[p + len(test_suit_separator):]
if skip_first_failed_tag:
skip_first_failed_tag = False
print(pl)
continue
if gtest_filter == "*":
indicator_count = count
else:
for k, v in tests.items():
if test_str == v:
indicator_count = k
break
indicator = '\33[91m' + '[' + str(indicator_count) + '] ' + '\033[0m'
print(pl + " " + indicator)
tests_output[str(indicator_count)] = test_str
count += 1
else:
print(pl)
if override_failing_storage:
output = dict()
output['exe'] = exe
output['tests'] = tests_output
f = open(config_path, "w")
f.write(json.dumps(output))
f.close()
def run_all_tests():
if config_path.exists():
with open(config_path) as f:
config_data = json.load(f)
exe = config_data['exe']
run_tests(exe)
else:
print("Please provide exe using --exe")
def run_failed_tests(exe, tests):
run_tests(exe, tests, False)
def run_specific_failed_tests(specified_tests):
if not config_path.exists():
print("Please provide exe using --exe")
return
with open(config_path) as f:
config_data = json.load(f)
exe = config_data['exe']
data = config_data['tests']
filtered_tests = dict()
for test in specified_tests:
if test in data.keys():
filtered_tests[test] = data[test]
if len(filtered_tests) == 0:
run_failed_tests(exe, data)
else:
run_tests(exe, filtered_tests, False)
def process_tests():
if exe_flag in sys.argv:
exe = sys.argv[sys.argv.index(exe_flag) + 1]
output = dict()
output['exe'] = exe
output['tests'] = dict()
f = open(config_path, "w")
f.write(json.dumps(output))
f.close()
elif failing_flag in sys.argv:
tests = sys.argv[sys.argv.index(failing_flag)+1:]
run_specific_failed_tests(tests)
else:
run_all_tests()
process_tests()