-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.py
68 lines (50 loc) · 2.23 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
from datasets import FaceKeyPointsDatasets
import paddle
from paddle.static import InputSpec
from net.backbone import FaceKeyPointsNetBody
from utils.metric import NME
import paddle.nn as nn
from utils.callback import LossHistory, ModelCheckpoint
from utils.optimizer import create_optimzer
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
paddle.set_device('gpu') # 使用gpu
# ------------------------------------ #
# -----------参数设置------------------ #
# batch_size -> 批次
# 主干网络 -> 'mobilenetv1', 'mobilenetv2', 'resnet50'
# epochs -> 轮次
# model_path训练权重,会自动下载预训练权重,可以不用设置
# input_shape -> 输入图片大小
batch_size = 16
backbone = 'mobilenetv2'
epochs = 100
model_path = ''
input_shape = [224,224]
if __name__ == '__main__':
df = pd.read_csv('./datasets/training_frames_keypoints.csv')
labels = df.values[:,1:]
data_mean = labels.mean()
data_std = labels.std()
train_datasets = FaceKeyPointsDatasets('./datasets/training_frames_keypoints.csv', './datasets/training', data_mean, data_std)
valid_datasets = FaceKeyPointsDatasets('./datasets/test_frames_keypoints.csv', './datasets/test', data_mean, data_std)
step_each_epoch = len(train_datasets) // batch_size
model = paddle.Model(FaceKeyPointsNetBody(68, backbone=backbone),inputs=[InputSpec(shape=[3, input_shape[0], input_shape[1]], dtype='float32', name='image')])
if model_path!='':
model.load(model_path)
print('导入模型成功!!!')
loss = nn.SmoothL1Loss()
metric = NME()
model.prepare(create_optimzer(model.parameters(), step_each_epoch, epochs), loss=loss, metrics=metric)
visualdl = paddle.callbacks.VisualDL(log_dir='./logs1')
EarlyStopping = paddle.callbacks.EarlyStopping(save_best_model=False,patience=15)
LRScheduler = paddle.callbacks.LRScheduler(by_epoch=True, by_step=False)
loss_history = LossHistory('./metric')
modelcheckpoint = ModelCheckpoint(save_dir='./logs')
model.fit(train_datasets,
valid_datasets,
epochs=epochs,
batch_size=batch_size,
verbose=1,
callbacks=[visualdl, EarlyStopping, modelcheckpoint, LRScheduler, loss_history])