Skip to content

Commit

Permalink
fix errors
Browse files Browse the repository at this point in the history
  • Loading branch information
yanx27 committed May 20, 2020
1 parent ecd6acc commit 5c28541
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 7 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ git clone https://github.com/yanx27/PointASNL.git
Installation instructions for Ubuntu 16.04 (available at **CUDA10**):

* Make sure <a href="https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html">CUDA</a> and <a href="https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html">cuDNN</a> are installed. Only this configurations has been tested:
- Python 3.6.9, TensorFlow 1.13.0, CUDA 10.1
- Python 3.6.9, TensorFlow 1.13.1, CUDA 10.1


* Follow <a href="https://www.tensorflow.org/install/pip">Tensorflow installation procedure</a>.
Expand Down
4 changes: 2 additions & 2 deletions models/pointasnl_sem_seg.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def get_model(point_cloud, is_training, num_class, bn_decay=None, weight_decay=N
return net, end_points


def get_loss(pred, label, end_points, smpw=1.0, uniform_weight=0.01, weights_decay=1e-4):
def get_loss(pred, label, end_points, smpw=1.0, uniform_weight=0.01, weights_decay=1e-4, radius=0.07):
"""
pred: BxNxC,
label: BxN,
Expand All @@ -59,7 +59,7 @@ def get_loss(pred, label, end_points, smpw=1.0, uniform_weight=0.01, weights_dec
regularization_losses = [tf.nn.l2_loss(v) for v in tf.global_variables() if 'weights' in v.name]
regularization_loss = weights_decay * tf.add_n(regularization_losses)
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw)
uniform_loss = get_repulsion_loss(end_points['l1_xyz'], nsample=20, radius=0.07)
uniform_loss = get_repulsion_loss(end_points['l1_xyz'], nsample=20, radius=radius)
weight_reg = tf.add_n(tf.get_collection('losses'))
classify_loss_mean = tf.reduce_mean(classify_loss, name='classify_loss_mean')
total_loss = classify_loss_mean + weight_reg + uniform_weight * uniform_loss + regularization_loss
Expand Down
4 changes: 2 additions & 2 deletions models/pointasnl_sem_seg_res.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def get_model(point_cloud, is_training, num_class, bn_decay=None, weight_decay=N
return net, end_points


def get_loss(pred, label, end_points, smpw=1.0, uniform_weight=0.01, weights_decay=1e-4):
def get_loss(pred, label, end_points, smpw=1.0, uniform_weight=0.01, weights_decay=1e-4, radius=0.07):
"""
pred: BxNxC,
label: BxN,
Expand All @@ -77,7 +77,7 @@ def get_loss(pred, label, end_points, smpw=1.0, uniform_weight=0.01, weights_dec
regularization_losses = [tf.nn.l2_loss(v) for v in tf.global_variables() if 'weights' in v.name]
regularization_loss = weights_decay * tf.add_n(regularization_losses)
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw)
uniform_loss = get_repulsion_loss(end_points['l1_xyz'], nsample=20, radius=0.07)
uniform_loss = get_repulsion_loss(end_points['l1_xyz'], nsample=20, radius=radius)
weight_reg = tf.add_n(tf.get_collection('losses'))
classify_loss_mean = tf.reduce_mean(classify_loss, name='classify_loss_mean')
total_loss = classify_loss_mean + weight_reg + uniform_weight * uniform_loss + regularization_loss
Expand Down
4 changes: 2 additions & 2 deletions utils/pointasnl_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,12 +147,12 @@ def SampleWeights(new_point, grouped_xyz, mlps, is_training, bn_decay, weight_de
new_group_features = tf.reshape(new_group_features, (batch_size, npoint, nsample, channel))
for i, c in enumerate(mlps):
activation = tf.nn.relu if i < len(mlps) - 1 else None
new_group_weights = tf_util.conv2d(new_group_features, c, [1, 1],
new_group_features = tf_util.conv2d(new_group_features, c, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mlp2_%d' % (i), bn_decay=bn_decay, weight_decay=weight_decay,
activation_fn=activation)
new_group_weights = tf.nn.softmax(new_group_weights, axis=2) # (batch_size, npoint,nsample, mlp[-1)
new_group_weights = tf.nn.softmax(new_group_features, axis=2) # (batch_size, npoint,nsample, mlp[-1)
return new_group_weights

def AdaptiveSampling(group_xyz, group_feature, num_neighbor, is_training, bn_decay, weight_decay, scope, bn):
Expand Down

0 comments on commit 5c28541

Please sign in to comment.