-
Notifications
You must be signed in to change notification settings - Fork 5
/
TYY_callbacks.py
58 lines (44 loc) · 1.29 KB
/
TYY_callbacks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import keras
from sklearn.metrics import roc_auc_score
import sys
import matplotlib.pyplot as plt
from keras.models import Model
import numpy as np
from keras import backend as K
class FineTuning(keras.callbacks.Callback):
def __init__(self, startEpoch):
self.startEpoch = startEpoch
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
if epoch == self.startEpoch:
self.model.layers[1].trainable = False
#LR = K.get_value(self.model.optimizer.lr)
#K.set_value(self.model.optimizer.lr,LR*0.1)
return
def on_epoch_end(self, epoch, logs={}):
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
class DecayLearningRate(keras.callbacks.Callback):
def __init__(self, startEpoch):
self.startEpoch = startEpoch
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
if epoch == self.startEpoch[0] or epoch == self.startEpoch[1]:
LR = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr,LR*0.1)
return
def on_epoch_end(self, epoch, logs={}):
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return