-
Notifications
You must be signed in to change notification settings - Fork 24
/
Copy pathtraining_save_deep_models.py
150 lines (118 loc) · 4.84 KB
/
training_save_deep_models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import argparse
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from help_code_demo import ToTensor, IEGM_DataSET
from models.model_1 import IEGMNet
def main():
# Hyperparameters
BATCH_SIZE = args.batchsz
BATCH_SIZE_TEST = args.batchsz
LR = args.lr
EPOCH = args.epoch
SIZE = args.size
path_data = args.path_data
path_indices = args.path_indices
# Instantiating NN
net = IEGMNet()
net.train()
net = net.float().to(device)
# Start dataset loading
trainset = IEGM_DataSET(root_dir=path_data,
indice_dir=path_indices,
mode='train',
size=SIZE,
transform=transforms.Compose([ToTensor()]))
trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
testset = IEGM_DataSET(root_dir=path_data,
indice_dir=path_indices,
mode='test',
size=SIZE,
transform=transforms.Compose([ToTensor()]))
testloader = DataLoader(testset, batch_size=BATCH_SIZE_TEST, shuffle=True, num_workers=0)
print("Training Dataset loading finish.")
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=LR)
epoch_num = EPOCH
Train_loss = []
Train_acc = []
Test_loss = []
Test_acc = []
print("Start training")
for epoch in range(epoch_num): # loop over the dataset multiple times (specify the #epoch)
running_loss = 0.0
correct = 0.0
accuracy = 0.0
i = 0
for j, data in enumerate(trainloader, 0):
inputs, labels = data['IEGM_seg'], data['label']
inputs = inputs.float().to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels).sum()
accuracy += correct / BATCH_SIZE
correct = 0.0
running_loss += loss.item()
i += 1
print('[Epoch, Batches] is [%d, %5d] \nTrain Acc: %.5f Train loss: %.5f' %
(epoch + 1, i, accuracy / i, running_loss / i))
Train_loss.append(running_loss / i)
Train_acc.append((accuracy / i).item())
running_loss = 0.0
accuracy = 0.0
correct = 0.0
total = 0.0
i = 0.0
running_loss_test = 0.0
for data_test in testloader:
net.eval()
IEGM_test, labels_test = data_test['IEGM_seg'], data_test['label']
IEGM_test = IEGM_test.float().to(device)
labels_test = labels_test.to(device)
outputs_test = net(IEGM_test)
_, predicted_test = torch.max(outputs_test.data, 1)
total += labels_test.size(0)
correct += (predicted_test == labels_test).sum()
loss_test = criterion(outputs_test, labels_test)
running_loss_test += loss_test.item()
i += 1
print('Test Acc: %.5f Test Loss: %.5f' % (correct / total, running_loss_test / i))
Test_loss.append(running_loss_test / i)
Test_acc.append((correct / total).item())
torch.save(net, './saved_models/IEGM_net.pkl')
torch.save(net.state_dict(), './saved_models/IEGM_net_state_dict.pkl')
file = open('./saved_models/loss_acc.txt', 'w')
file.write("Train_loss\n")
file.write(str(Train_loss))
file.write('\n\n')
file.write("Train_acc\n")
file.write(str(Train_acc))
file.write('\n\n')
file.write("Test_loss\n")
file.write(str(Test_loss))
file.write('\n\n')
file.write("Test_acc\n")
file.write(str(Test_acc))
file.write('\n\n')
print('Finish training')
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=2)
argparser.add_argument('--lr', type=float, help='learning rate', default=0.0001)
argparser.add_argument('--batchsz', type=int, help='total batchsz for traindb', default=32)
argparser.add_argument('--cuda', type=int, default=0)
argparser.add_argument('--size', type=int, default=1250)
argparser.add_argument('--path_data', type=str, default='H:/Date_Experiment/data_IEGMdb_ICCAD_Contest/segments-R250'
'-BPF15_55-Noise/tinyml_contest_data_training/')
argparser.add_argument('--path_indices', type=str, default='./data_indices')
args = argparser.parse_args()
device = torch.device("cuda:" + str(args.cuda))
print("device is --------------", device)
main()