1
+ #!/usr/bin/env python3
2
+ """
3
+ Created on Mon 09 Nov 2020 | 5:25 PM
4
+
5
+ @author: Ahmed Majuid
6
+
7
+ Usage: Define the network architecture and training hyperparameters
8
+ """
9
+
10
+ # non standard library
11
+ import os
12
+ import tensorflow as tf
13
+ import training
14
+ import utils
15
+ import postprocessing
16
+ from preprocessing .create_dataset import create_dataset
17
+
18
+ # Session Parameters
19
+ trial_number = 10
20
+
21
+ session_mode = ["Fresh" , "Resume" , "Evaluate" , "Override" ]
22
+ mode_id = 0
23
+ gpu_name = ["/GPU:0" , "/GPU:1" , None ]
24
+ gpu_id = 0
25
+
26
+ create_new_dataset = 1 # 0:No, 1:Yes
27
+
28
+ # Network Hyperparameters
29
+ batch_size = int (3 * 1024 )
30
+ learning_rate = 0.005
31
+ dropout = 0.0
32
+ epochs = 150
33
+ initial_epoch = 0
34
+ window_size = 50
35
+
36
+ # Network Architecture
37
+ model_architecture = [
38
+ tf .keras .layers .LSTM (100 , return_sequences = True ),
39
+ tf .keras .layers .LSTM (100 , return_sequences = True ),
40
+ tf .keras .layers .LSTM (100 , return_sequences = True ),
41
+ tf .keras .layers .LSTM (100 , return_sequences = False ),
42
+ tf .keras .layers .Dense (6 )
43
+ ]
44
+
45
+ n_features = 10
46
+ n_labels = 6
47
+
48
+ # Save the hyperparameters in a dictionary
49
+ session_data = {"trial_number" : trial_number ,
50
+ "session_mode" : session_mode [mode_id ],
51
+ "gpu_name" : gpu_name [gpu_id ],
52
+ "learning_rate" : learning_rate ,
53
+ "window_size" : window_size ,
54
+ "dropout" : dropout ,
55
+ "batch_size" : batch_size ,
56
+ "epochs" : epochs ,
57
+ "initial_epoch" : initial_epoch ,
58
+
59
+ "n_features" : n_features ,
60
+ "n_labels" : n_labels ,
61
+ }
62
+
63
+ # create folders for the training outputs (weights, plots, loss history)
64
+ trial_tree = utils .create_trial_tree (session_data ["trial_number" ], session_data ["session_mode" ])
65
+
66
+ if create_new_dataset :
67
+ session_data ["dataset_name" ] = None
68
+ else :
69
+ session_data ["dataset_name" ] = "T022_7logs_F11L10_W100_11Nov2020_1652"
70
+
71
+ # create windowed datasets from the flight csv files (or retrieve an old one from binary files)
72
+ train_ds , val_dataset , train_flights_dict , val_flights_dict , signals_weights = create_dataset (session_data )
73
+
74
+ # batch and shuffle
75
+ train_dataset = train_ds .batch (batch_size ).shuffle (buffer_size = 1000 )
76
+ val_dataset = val_dataset .batch (batch_size ).shuffle (buffer_size = 1000 )
77
+
78
+ # convert signals weights to a tensor to be used by the loss function
79
+ signals_weights_tensor = tf .constant (signals_weights , dtype = tf .float32 )
80
+
81
+ # start training
82
+ model = training .start_training (session_data , model_architecture , train_dataset , val_dataset , \
83
+ signals_weights_tensor , trial_tree )
84
+
85
+ # for every flight, plot all states (truth vs predictions)
86
+ postprocessing .evaluate_all_flights (model , train_flights_dict , val_flights_dict , \
87
+ trial_tree ["trial_root_folder" ], n_extreme_flights = 10 )
88
+
89
+ # add the network configuration and performance to the summary csv
90
+ postprocessing .summarize_session (trial_tree , model , session_data )
0 commit comments