-
Notifications
You must be signed in to change notification settings - Fork 1
/
config.yaml
60 lines (60 loc) · 1.76 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
---
dataset_dir: inputs/dataset # dataset location
label_from: directory # other options "file, dataframe"
# use build in keras flow_from_*. This doesn't provide much flexibility. And this is pretty slow
data_preparation: custom
# Dataset is cached in memory/ disk+memory
# Most of the time in a epoch wasted on reading the file from disk.
# if the data is in the cache then each epoch will be much faster
# If the dataset is bigger than your memory then use disk
data_set_cache: memory # "memory", "disk"
# Directory where all the model outputs will be saved
output_dir: output
# How much log information we want
log_level: info # "info", "debug", "warning", "error"
log_to: console # "console", "file"
# Percentage of dataset that will be used for validation purpose
validation_rate: .3 # float value
# Shape of the input image
input_shape: [64, 64, 3] # first two element is image size and last one is channel
# Number of output class to predict
num_output_class: 6 # int
# All the callbacks for our training
callbacks: # tensorboard, checkpoint, reduce_lr_plateau
#- tensorboard
- checkpoint
#- custom
transfer_learning: # Find link of all pretrained model here https://tfhub.dev/
hub_link: "https://tfhub.dev/google/imagenet/resnet_v1_101/classification/4"
do_fine_tuning: False
# Values to search in the region. For now it will use a grid search.
hparam_tuning:
learning_rate:
- .1
- .01
- .001
dropout:
- .1
- .2
- .3
optimizer:
- adam
- sgd
epochs:
- 2
activation:
- relu
loss:
- sparse_categorical_crossentropy
batch_size:
- 16
- 32
# Hyper parameter that will be used for training purpose
hparam:
activation: relu
learning_rate: .001
dropout: .3
optimizer: adam
epochs: 3
loss: sparse_categorical_crossentropy
batch_size: 32