Skip to content

Commit 23d3c9d

Browse files
committed
multimodal input code modifications for gym pybullet drones data
1 parent e576af3 commit 23d3c9d

File tree

145 files changed

+8125
-107
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

145 files changed

+8125
-107
lines changed

.gitignore

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
*.pyc
2+
3+
save/
4+
log/
5+
6+
tags
7+
8+
# Ignore PyCharm idea files
9+
.idea
10+
__pycache__
11+
12+
# caches
13+
*.h5
14+
15+
# Data
16+
data/*
17+
**/model_checkpoints/*
18+
processed-data/*
19+
training-histories/*
20+
model-piloted-runs/*
21+
console-log/*
22+
junk/*
23+
history/*
24+
histories/*
25+
logs/*
26+
airsim/*
27+
visualbackprop/*
28+
setup_path.py
29+
*~
30+
*.db
31+
*.tf
32+
*spec.txt
33+
cached_data
34+
35+
# pyenv
36+
.python-version
37+
38+
# keras
39+
.cache/*
40+
.keras/*
41+
42+
# vim
43+
*.swp
44+
45+
aaron*
46+
*_logs
47+
tuner_tests/
48+
*.vscode
49+
50+
# visual backprop results
51+
*.png
52+
*.mp4
53+
54+
# tuning results
55+
*.err
56+
*.log*
57+
*.out
58+
*.pkl
59+
**/*train_results.json
60+
61+
# dataset jsons
62+
#dataset_jsons/
63+
64+
# analysis results
65+
**/lipschitz_out/
66+
**/ssim_out/
67+
**/perturb_out/
68+
**/grid_out/
69+
**/loss_out/
70+
71+
# shap rep
72+
shap/

README.md

100644100755
File mode changed.

analysis/grad_cam.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from tensorflow.python.keras import Model
99
from tensorflow.python.keras.layers import Conv2D
1010
from tensorflow.python.keras.models import Functional
11+
import numpy as np
1112

1213
from utils.vis_utils import image_grid
1314
from utils.model_utils import load_model_from_weights, load_model_no_params, ModelParams
@@ -17,8 +18,14 @@ def compute_gradcam(img: Union[Tensor, ndarray], grad_model: Functional, hiddens
1718
pred_index: Optional[Sequence[Tensor]] = None):
1819
heatmaps, hiddens = _compute_gradcam(img=img, grad_model=grad_model, hiddens=hiddens, pred_index=pred_index)
1920
avg_heat = tf.math.add_n(heatmaps)
21+
22+
map_min = np.min(avg_heat)
23+
map_max = np.max(avg_heat)
24+
avg_heat = (avg_heat - map_min) / (map_max - map_min + 1e-6)
25+
2026
avg_heat = tf.expand_dims(avg_heat, axis=-1)
21-
return avg_heat, hiddens
27+
28+
return avg_heat, hiddens, avg_heat
2229

2330

2431
def compute_gradcam_tile(img: Union[Tensor, ndarray], grad_model: Functional, hiddens: Sequence[Tensor],
@@ -47,6 +54,7 @@ def _compute_gradcam(img: Union[Tensor, ndarray], grad_model: Functional, hidden
4754
# Then, we compute the gradient of the top predicted class for our input image
4855
# with respect to the activations of the last conv layer
4956
with tf.GradientTape() as tape:
57+
img = tf.convert_to_tensor(img, dtype=tf.float32)
5058
out = grad_model([img, *hiddens])
5159
last_conv_layer_output = out[0]
5260
preds = out[1]
@@ -59,7 +67,7 @@ def _compute_gradcam(img: Union[Tensor, ndarray], grad_model: Functional, hidden
5967
grads = tape.jacobian(preds, last_conv_layer_output)[0]
6068
last_conv_layer_output = last_conv_layer_output[0]
6169
for pred in pred_index:
62-
# This is the gradient of the output neuron (top pred1icted or chosen)
70+
# This is the gradient of the output neuron (top predicted or chosen)
6371
# with regard to the output feature map of the last conv layer
6472
grad = grads[pred]
6573

@@ -95,5 +103,6 @@ def get_last_conv(model_path: str, model_params: Optional[ModelParams] = None) -
95103
# First, we create a model that maps the input image to the activations
96104
# of the last conv layer as well as the output predictions
97105
return tf.keras.models.Model(
98-
[vis_model.inputs], [conv_layers[-1].output, *vis_model.output]
106+
inputs=[vis_model.inputs],
107+
outputs=[conv_layers[-1].output, *vis_model.output]
99108
)

analysis/loss_graph.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@
1010
import seaborn as sns
1111

1212
# noinspection PyArgumentList
13-
from utils.graph_utils import MARKERS
13+
# from utils.graph_utils import MARKERS
14+
MARKERS = ['o', 'v', '^', '<', '>', 's', 'p', '*', 'h', 'H', 'D', 'd']
1415

1516

1617
# noinspection PyArgumentList
@@ -42,6 +43,8 @@ def graph_loss(train_losses: Optional[Dict[str, Sequence[Tuple[int, float]]]] =
4243
plt.xlabel("Epoch")
4344
plt.ylabel("Loss")
4445
plt.legend(loc="upper right")
46+
# make y axis logaritmic:
47+
plt.yscale('log')
4548

4649
plt.autoscale(enable=True, axis='x', tight=True)
4750

@@ -81,6 +84,7 @@ def get_losses_from_checkpoints(checkpoint_paths: Sequence[str], save_dir: str,
8184
all_train_losses = {}
8285
all_val_losses = {}
8386
for i, checkpoint_path in enumerate(checkpoint_paths):
87+
print(checkpoint_path)
8488
match_reg = re.compile(".*(\d\d\d\d:\d\d:\d\d:\d\d:\d\d:\d\d)")
8589
date_str = match_reg.search(checkpoint_path).group(1)
8690

analysis/visual_backprop.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
import numpy as np
44
import tensorflow as tf
5+
physical_devices = tf.config.list_physical_devices('GPU')
6+
tf.config.experimental.set_memory_growth(physical_devices[0], True)
57
from numpy import ndarray
68
from tensorflow import keras, Tensor
79
from tensorflow.keras.layers import Conv2D
@@ -32,7 +34,6 @@ def compute_visualbackprop(img: Union[Tensor, ndarray],
3234
if isinstance(layer, Conv2D):
3335
kernels.append(layer.kernel_size)
3436
strides.append(layer.strides)
35-
3637
activations = activation_model.predict(img)
3738
average_layer_maps = []
3839
for layer_activation in activations: # Only the convolutional layers

archive/README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
Folder for storing legacy scripts that are no longer used or are deprecated. If a script in this directory is useful, move it outside.
2+
3+
Scripts used for simulated tasks are also in this folder for now

archive/analysis.py

Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
import pickle
2+
import os
3+
import seaborn as sns
4+
import matplotlib.pyplot as plt
5+
import argparse
6+
import sys
7+
import numpy as np
8+
import pandas as pd
9+
10+
REVISIONS = (5,6)
11+
12+
sns.set_theme(style="darkgrid")
13+
parser = argparse.ArgumentParser(description='Plot training history of deepdrone project')
14+
parser.add_argument('--history_dir', type=str, default="history", help='The path to the training histories')
15+
args = parser.parse_args()
16+
17+
enviromentNames = set()
18+
taskNames = set()
19+
modelNames = set()
20+
21+
histories = dict()
22+
for domain in os.listdir(args.history_dir):
23+
24+
task = domain[:domain.index('-')]
25+
enviroment = domain[domain.index('-')+1:]
26+
27+
if enviroment not in histories:
28+
histories[enviroment] = dict()
29+
enviromentNames.add(enviroment)
30+
31+
if task not in histories[enviroment]:
32+
histories[enviroment][task] = dict()
33+
taskNames.add(task)
34+
35+
36+
for historyFile in os.listdir(args.history_dir + '/' + domain ):
37+
38+
if any([(("rev-" + str(r)) in historyFile) for r in REVISIONS]):
39+
continue
40+
41+
# map each model type to its training histories
42+
model = historyFile[:historyFile.index('-')]
43+
if model not in histories[enviroment][task]:
44+
histories[enviroment][task][model] = []
45+
modelNames.add(model)
46+
47+
with open(args.history_dir + '/' + domain + '/' + historyFile, 'rb') as fp:
48+
print(args.history_dir + '/' + domain + '/' + historyFile)
49+
histories[enviroment][task][model].append(pickle.load(fp))
50+
51+
enviromentNames = sorted(list(enviromentNames))
52+
taskNames = sorted(list(taskNames))
53+
modelNames = sorted(list(modelNames))
54+
55+
56+
for enviroment in enviromentNames:
57+
for i, task in enumerate(taskNames):
58+
validationFrame = []
59+
for j, model in enumerate(modelNames):
60+
61+
try:
62+
loss = [h["loss"] for h in histories[enviroment][task][model]]
63+
validationLoss = [h["val_loss"] for h in histories[enviroment][task][model]]
64+
except KeyError:
65+
print(enviroment, task, model, "data missing")
66+
print("")
67+
continue
68+
69+
losses = np.array(loss)
70+
validations = np.array(validationLoss)
71+
72+
for epoche, replicationSet in enumerate(validations.T):
73+
for v in replicationSet:
74+
validationFrame.append([epoche, model, v])
75+
76+
77+
78+
validationFrame = pd.DataFrame(validationFrame, columns=["epoche", "model", "loss"])
79+
80+
sns.lineplot(data=validationFrame, x="epoche", y="loss", hue="model")
81+
plt.title(enviroment + " " + task)
82+
plt.show()
83+
84+
85+
86+
# for enviroment in enviromentNames:
87+
# fig, axes = plt.subplots(len(taskNames), len(modelNames))
88+
# axes = np.reshape(axes, (len(taskNames), len(modelNames)))
89+
# for i, task in enumerate(taskNames):
90+
# for j, model in enumerate(modelNames):
91+
#
92+
# if j == 0:
93+
# axes[i, 0].set_ylabel(task)
94+
#
95+
# if i == 0:
96+
# axes[0, j].set_title(model)
97+
#
98+
# try:
99+
# loss = [h["loss"] for h in histories[enviroment][task][model]]
100+
# validationLoss = [h["val_loss"] for h in histories[enviroment][task][model]]
101+
# except KeyError:
102+
# continue
103+
#
104+
# losses = np.array(loss)
105+
# validations = np.array(validationLoss)
106+
#
107+
# lossStdDev = np.std(losses, axis=0)
108+
# validationStdDev = np.std(validations, axis=0)
109+
#
110+
# lossMean = np.mean(losses, axis=0)
111+
# validationMean = np.mean(validations, axis=0)
112+
#
113+
# print(enviroment, task, model)
114+
# minIdx = np.argmin(validationMean)
115+
# print(f"{validationMean[minIdx]:.3} +/- {validationStdDev[minIdx]:.3}")
116+
#
117+
# # for l in losses:
118+
# # axes[i, j].plot(range(len(l)), l, "r", label="losses")
119+
#
120+
# for v in validations:
121+
# logV = [np.log(1 + 100000000 * np.abs(v_i)) for v_i in v]
122+
# axes[i, j].plot(range(len(v)), logV, "b", label="validations")
123+
#
124+
# # axes[i, j].errorbar(range(len(lossMean)), lossMean, yerr=lossStdDev, label="Training Loss")
125+
# # axes[i, j].errorbar(range(len(validationMean)), validationMean, yerr=validationStdDev, label="Validation Loss")
126+
# # axes[i, j].legend()
127+
#
128+
# # print(enviroment, task, model)
129+
# # print(loss.shape)
130+
# # print(loss)
131+
#
132+
# # print(lossStdDev.shape)
133+
# # print(lossStdDev)
134+
#
135+
# # sys.exit()
136+
#
137+
# print("")
138+
#
139+
# fig.suptitle(f"{enviroment} enviroment learning curves")
140+
# plt.show()

archive/compress-data.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import os
2+
import shutil
3+
import numpy as np
4+
5+
DATASET_PATH = 'C:\\Users\\MIT Driverless\\Documents\\AirSim\\following-neighborhood-parsed'
6+
TRUNCATED_PATH = DATASET_PATH + '-truncated'
7+
8+
if not os.path.exists(TRUNCATED_PATH):
9+
os.makedirs(TRUNCATED_PATH)
10+
11+
for dataset in os.listdir(DATASET_PATH):
12+
print(dataset)
13+
data_files = set(os.listdir(DATASET_PATH + '\\' + dataset))
14+
for f in data_files:
15+
data = np.load(DATASET_PATH + '\\' + dataset + '\\' + f)
16+
uint8_data = (255*data).astype(np.uint8)
17+
if not os.path.exists(TRUNCATED_PATH + '\\' + dataset):
18+
os.makedirs(TRUNCATED_PATH + '\\' + dataset)
19+
np.save(TRUNCATED_PATH + '\\' + dataset + '\\' + f, uint8_data)

archive/csvify.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import pickle
2+
import os
3+
import matplotlib.pyplot as plt
4+
import argparse
5+
import sys
6+
7+
import numpy as np
8+
9+
parser = argparse.ArgumentParser(description='Plot training history of deepdrone project')
10+
parser.add_argument('--history_dir', type=str, default="history", help='The path to the training histories')
11+
args = parser.parse_args()
12+
13+
enviromentNames = set()
14+
taskNames = set()
15+
modelNames = set()
16+
17+
histories = dict()
18+
for domain in os.listdir(args.history_dir):
19+
20+
task = domain[:domain.index('-')]
21+
enviroment = domain[domain.index('-')+1:]
22+
23+
if enviroment not in histories:
24+
histories[enviroment] = dict()
25+
enviromentNames.add(enviroment)
26+
27+
if task not in histories[enviroment]:
28+
histories[enviroment][task] = dict()
29+
taskNames.add(task)
30+
31+
32+
for i, historyFile in enumerate(os.listdir(args.history_dir + '/' + domain)):
33+
34+
# map each model type to its training histories
35+
model = historyFile[:historyFile.index('-')]
36+
if model not in histories[enviroment][task]:
37+
histories[enviroment][task][model] = []
38+
modelNames.add(model)
39+
40+
with open(args.history_dir + '/' + domain + '/' + historyFile, 'rb') as fp:
41+
history = pickle.load(fp)
42+
histories[enviroment][task][model].append(history)
43+
44+
np.savetxt(f"csv/{enviroment}-{task}-{model}-training-loss-{len(histories[enviroment][task][model])}", history["loss"], delimiter=",")
45+
np.savetxt(f"csv/{enviroment}-{task}-{model}-validation-loss-{len(histories[enviroment][task][model])}", history["val_loss"], delimiter=",")

0 commit comments

Comments
 (0)