Skip to content

Commit

Permalink
re-organize codes
Browse files Browse the repository at this point in the history
  • Loading branch information
yanx27 committed Mar 20, 2021
1 parent 0c51a35 commit 88790dd
Show file tree
Hide file tree
Showing 5 changed files with 161 additions and 112 deletions.
6 changes: 5 additions & 1 deletion data_utils/S3DISDataLoader.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import os
import numpy as np

from tqdm import tqdm
from torch.utils.data import Dataset


Expand All @@ -15,11 +17,13 @@ def __init__(self, split='train', data_root='trainval_fullarea', num_point=4096,
rooms_split = [room for room in rooms if not 'Area_{}'.format(test_area) in room]
else:
rooms_split = [room for room in rooms if 'Area_{}'.format(test_area) in room]

self.room_points, self.room_labels = [], []
self.room_coord_min, self.room_coord_max = [], []
num_point_all = []
labelweights = np.zeros(13)
for room_name in rooms_split:

for room_name in tqdm(rooms_split, total=len(rooms_split)):
room_path = os.path.join(data_root, room_name)
room_data = np.load(room_path) # xyzrgbl, N*7
points, labels = room_data[:, 0:6], room_data[:, 6] # xyzrgb, N*6; l, N
Expand Down
50 changes: 30 additions & 20 deletions test_partseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,17 @@
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))

seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37],
'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49],
'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}

seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat


def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
Expand All @@ -33,14 +38,15 @@ def to_categorical(y, num_classes):
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('PointNet')
parser.add_argument('--batch_size', type=int, default=24, help='batch size in testing [default: 24]')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
parser.add_argument('--log_dir', type=str, default='pointnet2_part_seg_ssg', help='Experiment root')
parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
parser.add_argument('--num_votes', type=int, default=3, help='Aggregate segmentation scores with voting [default: 3]')
parser.add_argument('--batch_size', type=int, default=24, help='batch size in testing')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--num_point', type=int, default=2048, help='point Number')
parser.add_argument('--log_dir', type=str, required=True, help='experiment root')
parser.add_argument('--normal', action='store_true', default=False, help='use normals')
parser.add_argument('--num_votes', type=int, default=3, help='aggregate segmentation scores with voting')
return parser.parse_args()


def main(args):
def log_string(str):
logger.info(str)
Expand All @@ -64,20 +70,19 @@ def log_string(str):

root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

TEST_DATASET = PartNormalDataset(root = root, npoints=args.num_point, split='test', normal_channel=args.normal)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size,shuffle=False, num_workers=4)
log_string("The number of test data is: %d" % len(TEST_DATASET))
TEST_DATASET = PartNormalDataset(root=root, npoints=args.num_point, split='test', normal_channel=args.normal)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)
log_string("The number of test data is: %d" % len(TEST_DATASET))
num_classes = 16
num_part = 50

'''MODEL LOADING'''
model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0]
model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0]
MODEL = importlib.import_module(model_name)
classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
classifier.load_state_dict(checkpoint['model_state_dict'])


with torch.no_grad():
test_metrics = {}
total_correct = 0
Expand All @@ -86,29 +91,35 @@ def log_string(str):
total_correct_class = [0 for _ in range(num_part)]
shape_ious = {cat: [] for cat in seg_classes.keys()}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}

for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat

for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
classifier = classifier.eval()
for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader),
smoothing=0.9):
batchsize, num_point, _ = points.size()
cur_batch_size, NUM_POINT, _ = points.size()
points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
points = points.transpose(2, 1)
classifier = classifier.eval()
vote_pool = torch.zeros(target.size()[0], target.size()[1], num_part).cuda()

for _ in range(args.num_votes):
seg_pred, _ = classifier(points, to_categorical(label, num_classes))
vote_pool += seg_pred

seg_pred = vote_pool / args.num_votes
cur_pred_val = seg_pred.cpu().data.numpy()
cur_pred_val_logits = cur_pred_val
cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
target = target.cpu().data.numpy()

for i in range(cur_batch_size):
cat = seg_label_to_cat[target[i, 0]]
logits = cur_pred_val_logits[i, :, :]
cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]

correct = np.sum(cur_pred_val == target)
total_correct += correct
total_seen += (cur_batch_size * NUM_POINT)
Expand Down Expand Up @@ -145,13 +156,12 @@ def log_string(str):
test_metrics['class_avg_iou'] = mean_shape_ious
test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

log_string('Accuracy is: %.5f' % test_metrics['accuracy'])
log_string('Class avg accuracy is: %.5f' % test_metrics['class_avg_accuracy'])
log_string('Class avg mIOU is: %.5f' % test_metrics['class_avg_iou'])
log_string('Inctance avg mIOU is: %.5f' % test_metrics['inctance_avg_iou'])

log_string('Accuracy is: %.5f'%test_metrics['accuracy'])
log_string('Class avg accuracy is: %.5f'%test_metrics['class_avg_accuracy'])
log_string('Class avg mIOU is: %.5f'%test_metrics['class_avg_iou'])
log_string('Inctance avg mIOU is: %.5f'%test_metrics['inctance_avg_iou'])

if __name__ == '__main__':
args = parse_args()
main(args)

35 changes: 21 additions & 14 deletions test_semseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,15 @@
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))

classes = ['ceiling','floor','wall','beam','column','window','door','table','chair','sofa','bookcase','board','clutter']
class2label = {cls: i for i,cls in enumerate(classes)}
classes = ['ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'table', 'chair', 'sofa', 'bookcase',
'board', 'clutter']
class2label = {cls: i for i, cls in enumerate(classes)}
seg_classes = class2label
seg_label_to_cat = {}
for i,cat in enumerate(seg_classes.keys()):
for i, cat in enumerate(seg_classes.keys()):
seg_label_to_cat[i] = cat


def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('Model')
Expand All @@ -38,15 +40,17 @@ def parse_args():
parser.add_argument('--num_votes', type=int, default=5, help='Aggregate segmentation scores with voting [default: 5]')
return parser.parse_args()


def add_vote(vote_label_pool, point_idx, pred_label, weight):
B = pred_label.shape[0]
N = pred_label.shape[1]
for b in range(B):
for n in range(N):
if weight[b,n]:
if weight[b, n]:
vote_label_pool[int(point_idx[b, n]), int(pred_label[b, n])] += 1
return vote_label_pool


def main(args):
def log_string(str):
logger.info(str)
Expand Down Expand Up @@ -75,17 +79,18 @@ def log_string(str):
BATCH_SIZE = args.batch_size
NUM_POINT = args.num_point

root = 'data/stanford_indoor3d/'
root = 'data/s3dis/stanford_indoor3d/'

TEST_DATASET_WHOLE_SCENE = ScannetDatasetWholeScene(root, split='test', test_area=args.test_area, block_points=NUM_POINT)
log_string("The number of test data is: %d" % len(TEST_DATASET_WHOLE_SCENE))
log_string("The number of test data is: %d" % len(TEST_DATASET_WHOLE_SCENE))

'''MODEL LOADING'''
model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0]
model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0]
MODEL = importlib.import_module(model_name)
classifier = MODEL.get_model(NUM_CLASSES).cuda()
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
classifier.load_state_dict(checkpoint['model_state_dict'])
classifier = classifier.eval()

with torch.no_grad():
scene_id = TEST_DATASET_WHOLE_SCENE.file_list
Expand All @@ -99,7 +104,7 @@ def log_string(str):
log_string('---- EVALUATION WHOLE SCENE----')

for batch_idx in range(num_batches):
print("visualize [%d/%d] %s ..." % (batch_idx+1, num_batches, scene_id[batch_idx]))
print("visualize [%d/%d] %s ..." % (batch_idx + 1, num_batches, scene_id[batch_idx]))
total_seen_class_tmp = [0 for _ in range(NUM_CLASSES)]
total_correct_class_tmp = [0 for _ in range(NUM_CLASSES)]
total_iou_deno_class_tmp = [0 for _ in range(NUM_CLASSES)]
Expand All @@ -119,6 +124,7 @@ def log_string(str):
batch_label = np.zeros((BATCH_SIZE, NUM_POINT))
batch_point_index = np.zeros((BATCH_SIZE, NUM_POINT))
batch_smpw = np.zeros((BATCH_SIZE, NUM_POINT))

for sbatch in range(s_batch_num):
start_idx = sbatch * BATCH_SIZE
end_idx = min((sbatch + 1) * BATCH_SIZE, num_blocks)
Expand All @@ -130,7 +136,7 @@ def log_string(str):
batch_data[:, :, 3:6] /= 1.0

torch_data = torch.Tensor(batch_data)
torch_data= torch_data.float().cuda()
torch_data = torch_data.float().cuda()
torch_data = torch_data.transpose(2, 1)
seg_pred, _ = classifier(torch_data)
batch_pred_label = seg_pred.contiguous().cpu().data.max(2)[1].numpy()
Expand Down Expand Up @@ -166,12 +172,12 @@ def log_string(str):
color_gt = g_label2color[whole_scene_label[i]]
if args.visual:
fout.write('v %f %f %f %d %d %d\n' % (
whole_scene_data[i, 0], whole_scene_data[i, 1], whole_scene_data[i, 2], color[0], color[1],
color[2]))
whole_scene_data[i, 0], whole_scene_data[i, 1], whole_scene_data[i, 2], color[0], color[1],
color[2]))
fout_gt.write(
'v %f %f %f %d %d %d\n' % (
whole_scene_data[i, 0], whole_scene_data[i, 1], whole_scene_data[i, 2], color_gt[0],
color_gt[1], color_gt[2]))
whole_scene_data[i, 0], whole_scene_data[i, 1], whole_scene_data[i, 2], color_gt[0],
color_gt[1], color_gt[2]))
if args.visual:
fout.close()
fout_gt.close()
Expand All @@ -187,10 +193,11 @@ def log_string(str):
log_string('eval whole scene point avg class acc: %f' % (
np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))
log_string('eval whole scene point accuracy: %f' % (
np.sum(total_correct_class) / float(np.sum(total_seen_class) + 1e-6)))
np.sum(total_correct_class) / float(np.sum(total_seen_class) + 1e-6)))

print("Done!")


if __name__ == '__main__':
args = parse_args()
main(args)
Loading

0 comments on commit 88790dd

Please sign in to comment.