-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathrun_experiment.py
312 lines (259 loc) · 12.5 KB
/
run_experiment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
import json
import pickle
from argparse import ArgumentParser
from logging import getLogger
from pathlib import Path
from typing import Union
from jenga.utils import BINARY_CLASSIFICATION, MULTI_CLASS_CLASSIFICATION, REGRESSION
from conformal_data_cleaning import setup_logger
from conformal_data_cleaning.cleaner.conformal import ConformalAutoGluonCleaner
from conformal_data_cleaning.cleaner.machine_learning import AutoGluonCleaner
from conformal_data_cleaning.cleaner.pyod import PyodECODCleaner
from conformal_data_cleaning.config import error_types, method_hyperparameters
from conformal_data_cleaning.jenga_extension import get_OpenMLTask
from conformal_data_cleaning.utils import set_seed
from .hyperparameters import hyperparameters
setup_logger("main")
logger = getLogger("main")
logger.propagate = False
set_seed(42)
def start_baseline(
task_id: int,
error_fractions: list[float],
num_repetitions: int,
method: str,
method_hyperparameter: float,
how_many_hpo_trials: int,
results_path: Path,
models_path: Path,
) -> None:
logger.info("Starting baseline with arguments ...")
logger.info(f"\t--task_id={task_id}")
logger.info(f"\t--error_fractions={error_fractions}")
logger.info(f"\t--num_repetitions={num_repetitions}")
logger.info(f"\t--method={method}")
logger.info(f"\t--method_hyperparameter={method_hyperparameter}")
logger.info(f"\t--how_many_hpo_trials={how_many_hpo_trials}")
logger.info(f"\t--results_path={results_path}")
logger.info(f"\t--models_path={models_path}")
if method not in method_hyperparameters.keys():
raise ValueError(f"'method' need to be one of: {''.join(method_hyperparameters.keys())}")
for repetition in range(num_repetitions):
logger.info(f"Start repetition #{repetition + 1} of {num_repetitions}")
# get task data and fit baseline model
original_task = get_OpenMLTask(task_id)
original_task.fit_baseline_model()
original_perf = original_task.calculate_performance(original_task.test_data)
save_downstream_performance(
original_perf,
task_type=original_task._task_type,
file=results_path
/ str(task_id)
/ method
/ str(method_hyperparameter)
/ str(repetition)
/ "original_perf.json",
)
# fit cleaning model to clean future incoming potentially dirty data
if method == "PyodECOD":
baseline_cleaner: Union[PyodECODCleaner, AutoGluonCleaner] = PyodECODCleaner(
contamination=method_hyperparameter,
)
baseline_cleaner.fit(original_task.train_data)
elif method == "AutoGluon":
# prepare path prefix for models
models_path_prefix = models_path / str(task_id) / method / str(method_hyperparameter) / str(repetition)
predictor_params = {"path_prefix": models_path_prefix}
baseline_cleaner = AutoGluonCleaner(
method_hyperparameter,
method_hyperparameter,
)
baseline_cleaner.fit(
original_task.train_data,
fit_params={
"verbosity": 1,
"hyperparameters": hyperparameters,
"hyperparameter_tune_kwargs": {"num_trials": how_many_hpo_trials},
"ag_args_fit": {"num_cpus": 24},
},
predictor_params=predictor_params,
)
# save leaderboards which contains information about training times
for column, predictor in baseline_cleaner.predictors_.items():
leaderboard_file_path = f"{models_path_prefix / column}.csv"
predictor.leaderboard(extra_info=True, silent=True).to_csv(leaderboard_file_path, index=False)
else:
logger.warning("This should be checked before..")
# iterate over corrupted test data
for fraction in error_fractions:
for corruption in error_types:
# downstream performance on corrupted data
corrupted_task = get_OpenMLTask(task_id=task_id, corruption=corruption, fraction=fraction)
corrupted_perf = original_task.calculate_performance(corrupted_task.test_data)
directory_for_results = (
results_path
/ str(task_id)
/ method
/ str(method_hyperparameter)
/ str(repetition)
/ corruption
/ str(fraction)
)
directory_for_results.mkdir(parents=True, exist_ok=True)
save_downstream_performance(
corrupted_perf,
task_type=corrupted_task._task_type,
file=directory_for_results / "corrupted_perf.json",
)
cleaned_data, cleaned_mask, _ = baseline_cleaner.transform(
corrupted_task.test_data,
)
cleaned_perf = original_task.calculate_performance(cleaned_data)
save_downstream_performance(
cleaned_perf,
task_type=corrupted_task._task_type,
file=directory_for_results / "cleaned_perf.json",
)
# save cleaned data
cleaned_data.to_csv(directory_for_results / "cleaned_data.csv", index=False)
cleaned_mask.to_csv(directory_for_results / "cleaned_mask.csv", index=False)
# cleanup before next iteration
del baseline_cleaner, original_task, corrupted_task
# indicate that this experiment is already finished
(results_path / str(task_id) / method / str(method_hyperparameter) / "finished.txt").touch()
def start_experiment(
task_id: int,
confidence_level: float,
error_fractions: list[float],
num_repetitions: int,
how_many_hpo_trials: int,
results_path: Path,
models_path: Path,
) -> None:
logger.info("Starting experiment with arguments ...")
logger.info(f"\t--task_id={task_id}")
logger.info(f"\t--confidence_level={confidence_level}")
logger.info(f"\t--error_fractions={error_fractions}")
logger.info(f"\t--num_repetitions={num_repetitions}")
logger.info(f"\t--how_many_hpo_trials={how_many_hpo_trials}")
logger.info(f"\t--results_path={results_path}")
logger.info(f"\t--models_path={models_path}")
for repetition in range(num_repetitions):
logger.info(f"Start repetition #{repetition + 1} of {num_repetitions}")
# get task data and fit baseline model
original_task = get_OpenMLTask(task_id)
original_task.fit_baseline_model()
original_perf = original_task.calculate_performance(original_task.test_data)
save_downstream_performance(
original_perf,
task_type=original_task._task_type,
file=results_path
/ str(task_id)
/ "ConformalAutoGluon"
/ str(confidence_level)
/ str(repetition)
/ "original_perf.json",
)
# prepare path prefix for models
models_path_prefix = models_path / str(task_id) / "ConformalAutoGluon" / str(confidence_level) / str(repetition)
ci_ag_predictor_params = {"path_prefix": models_path_prefix}
# fit cleaning model to clean future incoming potentially dirty data
cleaner = ConformalAutoGluonCleaner(
confidence_level=confidence_level,
)
cleaner.fit(
original_task.train_data,
ci_ag_fit_params={
"verbosity": 1,
"hyperparameters": hyperparameters,
"hyperparameter_tune_kwargs": {"num_trials": how_many_hpo_trials},
"ag_args_fit": {"num_cpus": 24},
},
ci_ag_predictor_params=ci_ag_predictor_params,
)
# save leaderboards which contains information about training times
for column, predictor in cleaner.predictors_.items():
leaderboard_file_path = f"{models_path_prefix / column}.csv"
predictor._predictor.leaderboard(extra_info=True, silent=True).to_csv(leaderboard_file_path, index=False)
# iterate over corrupted test data
for fraction in error_fractions:
for corruption in error_types:
# downstream performance on corrupted data
corrupted_task = get_OpenMLTask(task_id=task_id, corruption=corruption, fraction=fraction)
corrupted_perf = original_task.calculate_performance(corrupted_task.test_data)
directory_for_results = (
results_path
/ str(task_id)
/ "ConformalAutoGluon"
/ str(confidence_level)
/ str(repetition)
/ corruption
/ str(fraction)
)
directory_for_results.mkdir(parents=True, exist_ok=True)
save_downstream_performance(
corrupted_perf,
task_type=corrupted_task._task_type,
file=directory_for_results / "corrupted_perf.json",
)
# downstream performance on cleaned data
cleaned_data, cleaned_mask, prediction_sets = cleaner.transform(
corrupted_task.test_data,
empty_pred_sets_are_inliers=True, # type: ignore
)
with open(directory_for_results / "prediction_sets.pckl", "wb") as file:
pickle.dump(prediction_sets, file)
cleaned_perf = original_task.calculate_performance(cleaned_data)
save_downstream_performance(
cleaned_perf,
task_type=corrupted_task._task_type,
file=directory_for_results / "cleaned_perf.json",
)
# save cleaned data
cleaned_data.to_csv(directory_for_results / "cleaned_data.csv", index=False)
cleaned_mask.to_csv(directory_for_results / "cleaned_mask.csv", index=False)
# cleanup before next iteration
del cleaner, original_task, corrupted_task
# indicate that this experiment is already finished
(results_path / str(task_id) / "ConformalAutoGluon" / str(confidence_level) / "finished.txt").touch()
def save_downstream_performance(downstream_metrics: tuple[float, float, float], task_type: int, file: Path) -> None:
if task_type == BINARY_CLASSIFICATION or task_type == MULTI_CLASS_CLASSIFICATION:
downstream_metric_names = ("F1_micro", "F1_macro", "F1_weighted")
elif task_type == REGRESSION:
downstream_metric_names = ("MAE", "MSE", "RMSE")
file.parents[0].mkdir(parents=True, exist_ok=True)
logger.info(f"Saving results at: {file}")
file.write_text(
json.dumps(
{
downstream_metric_names[0]: downstream_metrics[0], # type: ignore
downstream_metric_names[1]: downstream_metrics[1], # type: ignore
downstream_metric_names[2]: downstream_metrics[2], # type: ignore
},
),
)
def start() -> None:
"""CLI implementation of the `run-experiment` command."""
parser = ArgumentParser(description="CLI to start a data-cleaning experiment.")
parser.add_argument("--task_id", required=True, type=int)
parser.add_argument("--error_fractions", required=True, nargs="+", type=float)
parser.add_argument("--num_repetitions", required=True, type=int)
parser.add_argument("--results_path", type=Path, required=True)
parser.add_argument("--models_path", type=Path, required=True)
parser.add_argument("--how_many_hpo_trials", required=True, type=int)
subparsers = parser.add_subparsers()
# baseline
baseline_parser = subparsers.add_parser("baseline")
baseline_parser.set_defaults(command_function=start_baseline)
baseline_parser.add_argument("--method", required=True, type=str)
baseline_parser.add_argument("--method_hyperparameter", required=True, type=float)
# experiment
experiment_parser = subparsers.add_parser("experiment")
experiment_parser.set_defaults(command_function=start_experiment)
experiment_parser.add_argument("--confidence_level", required=True, type=float)
# RUN!
args = parser.parse_args()
args.command_function(
# call given command function with parameters
**{parameter: value for parameter, value in vars(args).items() if parameter != "command_function"},
)