Skip to content

Commit 15a9de9

Browse files
committed
Clean up code for project submission.
1 parent b427346 commit 15a9de9

File tree

14 files changed

+124
-303
lines changed

14 files changed

+124
-303
lines changed

configs/s3dis/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
configs.train = Config()
1919
configs.train.restart_training = False
2020
configs.train.num_epochs = 50
21-
configs.train.batch_size = 32 # Original: 32
21+
configs.train.batch_size = 32
2222
configs.train.loss_fn = Config(tf.keras.losses.CategoricalCrossentropy)
2323
configs.train.loss_fn.axis = 1
2424
configs.train.loss_fn.reduction = tf.keras.losses.Reduction.SUM

configs/s3dis/pvcnn/__init__.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
"""Experiment configurations for S3DIS PVCNN model."""
2-
# TODO: If TF's CosineDecay much different from Pytorch's equivalent?
3-
import tensorflow as tf
4-
from tensorflow.keras.optimizers.schedules import CosineDecayRestarts
2+
# TODO: Is TF's CosineDecay much different from Pytorch's equivalent?
3+
from tensorflow.keras.optimizers.schedules import CosineDecay
54
from tensorflow.keras.regularizers import L2
65

76
from modeling import PVCNN, AttentionSchedule
@@ -23,11 +22,7 @@
2322
configs.dataset.num_points = 4096
2423

2524
# train: scheduler
26-
# configs.train.optimizer.learning_rate = 1e-3
2725
configs.train.optimizer.learning_rate = Config(AttentionSchedule)
28-
configs.train.optimizer.learning_rate.d_model = 100 # Orig: 128
29-
configs.train.optimizer.learning_rate.warmup_steps = 1000 # Orig: 4000
26+
configs.train.optimizer.learning_rate.d_model = 100
27+
configs.train.optimizer.learning_rate.warmup_steps = 1000
3028
configs.train.optimizer.learning_rate.eps = 1e-8
31-
# configs.train.optimizer.learning_rate = Config(CosineDecayRestarts)
32-
# configs.train.optimizer.learning_rate.initial_learning_rate = 1e-3
33-
# configs.train.optimizer.learning_rate.first_decay_steps = 3200 # Empirical

dataloaders/s3dis.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def create_s3dis_dataset(
5555
lambda label, data_num_points, data,
5656
use_normalized_coords=use_normalized_coords,
5757
desired_num_points=num_points,
58-
num_classes=num_classes: _random_sample_data(
58+
num_classes=num_classes: _pre_process(
5959
data,
6060
label,
6161
data_num_points,
@@ -128,7 +128,7 @@ def _get_file_info(
128128

129129

130130
@tf.function
131-
def _random_sample_data(
131+
def _pre_process(
132132
data: tf.Tensor,
133133
label: tf.Tensor,
134134
data_num_points: tf.Tensor,
@@ -170,15 +170,13 @@ def sample_without_replacement():
170170
label = tf.gather(label, indices=indices)
171171

172172
# Filter out any points with class labels > num_classes or < 0
173+
# fmt: off
173174
indices = tf.squeeze(
174-
tf.where(
175-
tf.math.logical_and(
175+
tf.where(tf.math.logical_and(
176176
tf.less_equal(label, num_classes - 1),
177177
tf.greater_equal(label, 0),
178-
)
179-
),
180-
1,
181-
)
178+
)), 1)
179+
# fmt: on
182180
data = tf.gather(data, indices=indices)
183181
label = tf.cast(tf.gather(label, indices=indices), dtype=tf.int32)
184182

metrics/s3dis.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1-
"""Evaluation metrics for PVCNN S3DIS dataset."""
1+
"""Custom evaluation metrics for PVCNN S3DIS dataset."""
22
import tensorflow as tf
33
import numpy as np
44
from keras import backend
55

6+
67
class IouAccuracy(tf.keras.metrics.Metric):
78
"""Mean IoU accuracy metric."""
89

@@ -58,5 +59,4 @@ def result(self) -> None:
5859
self._total_correct,
5960
(self._total_seen + self._total_positive - self._total_correct),
6061
)
61-
# tf.print("IOU accuracy for each class:", iou, summarize=-1)
6262
return tf.reduce_sum(iou) / self._num_classes

modeling/layers/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""PVCNN Custom Layer Implementations."""
22
from modeling.layers.mlp import ConvBn, DenseBn
33
from modeling.layers.pvconv import PVConv
4-
from modeling.layers.sub_models import (
4+
from modeling.layers.components import (
55
PointFeaturesBranch,
66
CloudFeaturesBranch,
77
ClassificationHead,

modeling/layers/sub_models.py renamed to modeling/layers/components.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -147,21 +147,13 @@ def build(self, input_shape) -> None:
147147
self._num_points = int(input_shape[-1])
148148
super().build(input_shape)
149149

150-
# TODO: Delete debugging prints later
151150
def call(self, inputs, training: bool) -> tf.Tensor:
152-
# print("\nCloudFeaturesBranch inputs =", inputs)
153151
# Get maximum channel value for each channel over all of the points
154152
x = tf.math.reduce_max(inputs, axis=-1)
155-
# print("\nCloudFeaturesBranch reduced inputs shape =", x.shape)
156153
for layer in self._layers:
157154
x = layer(x, training=training)
158-
# tf.print("CloudFeaturesBranch layer x out nans =", tf.size(tf.where(tf.math.is_nan(x))))
159-
# print("\nCloudFeaturesBranch intermed layer shape =", x.shape)
160155
x = replace_nans_with_norm(x)
161156
# Duplicate output tensor for N size num_points dimension
162-
# print("\nNon-repeated out tensor = ", x)
163-
# print(\nNon-repeated out tensor nan idxs = ", tf.where(tf.math.is_nan(x)))
164-
# return tf.stack([x] * 4096, axis=-1)
165157
return tf.repeat(tf.expand_dims(x, axis=-1), self._num_points, axis=-1)
166158

167159

@@ -192,6 +184,5 @@ def call(self, inputs, training: bool) -> tf.Tensor:
192184
x = inputs
193185
for layer in self._layers:
194186
x = layer(x, training=training)
195-
# tf.print("ClassificationHead layer x out nans =", tf.size(tf.where(tf.math.is_nan(x))))
196187
x = replace_nans_with_norm(x)
197188
return self._softmax(x)

modeling/layers/nan_replace.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
1+
"""A temporary module to fix the NaN issues during training."""
12
import tensorflow as tf
23

4+
35
def replace_nans_with_norm(has_nans: tf.Tensor) -> tf.Tensor:
4-
repl_nans_with_zeros = tf.where(tf.math.is_nan(has_nans), tf.zeros_like(has_nans), has_nans)
6+
"""Replace NaNs in the input tensor with the norm of the tensor once NaNs
7+
have been replaced with zeros."""
8+
repl_nans_with_zeros = tf.where(
9+
tf.math.is_nan(has_nans), tf.zeros_like(has_nans), has_nans
10+
)
511
norm = tf.norm(repl_nans_with_zeros)
6-
return tf.where(tf.math.is_nan(has_nans), tf.fill(has_nans.shape, norm), has_nans)
12+
return tf.where(
13+
tf.math.is_nan(has_nans), tf.fill(has_nans.shape, norm), has_nans
14+
)

modeling/layers/pvconv.py

Lines changed: 5 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212

1313
class PVConv(tf.keras.layers.Layer):
14-
"""The infamous PVConv block."""
14+
"""The PVConv block."""
1515

1616
def __init__(
1717
self,
@@ -56,71 +56,44 @@ def build(self, input_shape) -> None:
5656
tf.keras.layers.BatchNormalization(axis=1, epsilon=1e-4)
5757
)
5858
self._relu_layers.append(tf.keras.layers.LeakyReLU(alpha=0.1))
59-
# Add extra batch norm layer to add before 1st conv3d layer
60-
# self._bn_layers.append(
61-
# tf.keras.layers.BatchNormalization(axis=1, epsilon=1e-4)
62-
# )
6359

6460
self._point_features = ConvBn(
6561
out_channels=self._out_channels,
6662
kernel_regularizer=self._kernel_regularizer,
6763
)
68-
6964
self._squeeze = tf.keras.layers.Reshape((self._out_channels, -1))
70-
7165
super().build(input_shape)
7266

73-
# TODO: Delete debugging prints later
7467
def call(self, inputs, training: bool) -> Tuple[tf.Tensor, tf.Tensor]:
7568
# IC = input channels | OC = output channels (self._out_channels)
7669
# features = [B, IC, N] | coords = [B, 3, N]
7770
features, coords = inputs
78-
# tf.print("\nfeatures nans =", tf.size(tf.where(tf.math.is_nan(features))))
79-
# tf.print("coords nans =", tf.size(tf.where(tf.math.is_nan(coords))))
80-
# features = tf.debugging.assert_all_finite(features, "features is nan")
81-
# coords = tf.debugging.assert_all_finite(coords, "coords is nan")
8271

8372
voxel_features, voxel_coords = self._voxelization((features, coords))
84-
# tf.print("voxel features shape=", voxel_features.shape)
8573
# |--> voxel_features = [B, IC, R, R, R] | voxel_coords = [B, 3, N]
86-
# tf.print("voxelization features nans =", tf.size(tf.where(tf.math.is_nan(voxel_features))))
8774
voxel_features = replace_nans_with_norm(voxel_features)
88-
# voxel_features = self._voxelization_nan_filter(voxel_features)
89-
# tf.print("voxelization features CLIPPED nans =", tf.size(tf.where(tf.math.is_nan(voxel_features))))
90-
# tf.print("voxelization coords nans =", tf.size(tf.where(tf.math.is_nan(voxel_coords))))
91-
# voxel_features = tf.debugging.assert_all_finite(voxel_features, "voxelization feat is nan")
92-
# voxel_coords = tf.debugging.assert_all_finite(voxel_coords, "voxelization coords is nan")
9375

94-
# voxel_features = self._bn_layers[-1](voxel_features, training=training)
95-
for conv, bn, relu in zip(self._conv_layers, self._bn_layers, self._relu_layers):
76+
for conv, bn, relu in zip(
77+
self._conv_layers, self._bn_layers, self._relu_layers
78+
):
9679
voxel_features = conv(voxel_features)
97-
voxel_features = replace_nans_with_norm(voxel_features)
98-
# tf.print("conv3d out features nans =", tf.size(tf.where(tf.math.is_nan(voxel_features))))
99-
# voxel_features = tf.debugging.assert_all_finite(voxel_features, "conv3d out feat is nan")
10080
voxel_features = bn(voxel_features, training=training)
101-
# voxel_features = replace_nans_with_norm(voxel_features)
10281
voxel_features = relu(voxel_features)
103-
# voxel_features = replace_nans_with_norm(voxel_features)
104-
# voxel_features = replace_nans_with_norm(voxel_features)
82+
voxel_features = replace_nans_with_norm(voxel_features)
10583

10684
# |--> voxel_features = [B, OC, R, R, R]
10785
voxel_features = self._squeeze(voxel_features)
10886
# |--> voxel_features = [B, OC, R**3]
10987
voxel_features, _, _ = trilinear_devoxelize(
11088
voxel_features, voxel_coords, self._resolution, training
11189
)
112-
# tf.print("devox out features nans =", tf.size(tf.where(tf.math.is_nan(voxel_features))))
113-
# voxel_features = tf.debugging.assert_all_finite(voxel_features, "devox out feat is nan")
11490
# |--> voxel_features = [B, OC, N]
11591
voxel_features = replace_nans_with_norm(voxel_features)
11692

11793
point_features = self._point_features(features, training=training)
118-
# tf.print("point feautes nans =", tf.size(tf.where(tf.math.is_nan(point_features))))
11994
# |--> point_features = [B, OC, N]
12095
point_features = replace_nans_with_norm(point_features)
12196

12297
fused_features = voxel_features + point_features
123-
# tf.print("fused out features nans =", tf.size(tf.where(tf.math.is_nan(fused_features))))
124-
# fused_features = tf.debugging.assert_all_finite(fused_features, "fused feat is nan")
12598
# |--> fused_features = [B, OC, N]
12699
return fused_features, coords

modeling/layers/voxelization.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,7 @@
88
class Voxelization(tf.keras.layers.Layer):
99
"""Voxelization layer."""
1010

11-
def __init__(
12-
self, resolution: int, normalize: bool, eps: float, **kwargs
13-
):
11+
def __init__(self, resolution: int, normalize: bool, eps: float, **kwargs):
1412
super().__init__(**kwargs)
1513
self._resolution = resolution
1614
self._normalize = normalize

modeling/s3dis/lr_schedule.py

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,14 @@
1-
"""TODO if we end up using"""
1+
"""Define custom learning rate schedulers."""
2+
23
import tensorflow as tf
34

5+
46
class AttentionSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
5-
"""TODO if we end up using
6-
https://www.tensorflow.org/text/tutorials/transformer#optimizer
7+
"""Learning rate scheduler based off of the scheduler used in
8+
Transformers (Vaswani, Ashish, et al.). Adapted from
9+
https://www.tensorflow.org/text/tutorials/transformer#optimizer.
710
"""
11+
812
def __init__(self, d_model=128, warmup_steps=4000, eps=1e-8):
913
super().__init__()
1014
self.d_model = d_model
@@ -15,22 +19,5 @@ def __init__(self, d_model=128, warmup_steps=4000, eps=1e-8):
1519
def __call__(self, step):
1620
arg1 = tf.math.rsqrt(step)
1721
arg2 = step * (self.warmup_steps ** -1.5)
18-
# tf.print("----------------")
19-
# tf.print("step = ", step)
20-
# tf.print("arg1 = ", arg1)
21-
# tf.print("arg2 = ", arg2)
22-
23-
lr = tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) + self.eps
24-
# tf.print("lr = ", lr)
25-
# tf.print("----------------")
26-
return lr
27-
28-
29-
# -------------------------------------------------------------------
30-
# import matplotlib.pyplot as plt
3122

32-
# temp_learning_rate_schedule = AttentionSchedule()
33-
# plt.plot(temp_learning_rate_schedule(tf.range(80000, dtype=tf.float32)))
34-
# plt.ylabel("Learning Rate")
35-
# plt.xlabel("Train Step")
36-
# plt.savefig("custom-schedule.png")
23+
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) + self.eps

0 commit comments

Comments
 (0)