-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathinference.py
67 lines (51 loc) · 1.82 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import warnings
import hydra
import torch
from hydra.utils import instantiate
from src.datasets.data_utils import get_dataloaders
from src.trainer import Inferencer
from src.utils.init_utils import set_random_seed
from src.utils.io_utils import ROOT_PATH
warnings.filterwarnings("ignore", category=UserWarning)
@hydra.main(version_base=None, config_path="src/configs", config_name="inference")
def main(config):
"""
Main script for inference. Instantiates the model, metrics, and
dataloaders. Runs Inferencer to calculate metrics and (or)
save predictions.
Args:
config (DictConfig): hydra experiment config.
"""
set_random_seed(config.inferencer.seed)
if config.inferencer.device == "auto":
device = "cuda" if torch.cuda.is_available() else "cpu"
else:
device = config.inferencer.device
# setup data_loader instances
# batch_transforms should be put on device
dataloaders, batch_transforms = get_dataloaders(config, device)
# build model architecture, then print to console
model = instantiate(config.model).to(device)
print(model)
# get metrics
metrics = instantiate(config.metrics)
# save_path for model predictions
save_path = ROOT_PATH / "data" / "saved" / config.inferencer.save_path
save_path.mkdir(exist_ok=True, parents=True)
inferencer = Inferencer(
model=model,
config=config,
device=device,
dataloaders=dataloaders,
batch_transforms=batch_transforms,
save_path=save_path,
metrics=metrics,
skip_model_load=False,
)
logs = inferencer.run_inference()
for part in logs.keys():
for key, value in logs[part].items():
full_key = part + "_" + key
print(f" {full_key:15s}: {value}")
if __name__ == "__main__":
main()