PointCNN icon indicating copy to clipboard operation
PointCNN copied to clipboard

Making Predictions

Open niallomahony93 opened this issue 5 years ago • 3 comments

Hi,

I am trying to extract the prediction per point cloud being fed for inference one by one.

Forgive me for my ignorance, but I cannot see how to do this from the validation block of the code Can you give me some pointer on how to get from where I am so far: `

    saver = tf.train.import_meta_graph(args.load_ckpt+'.meta')
    saver.restore(sess, args.load_ckpt)
    print('{}-Checkpoint loaded from {}!'.format(datetime.now(), args.load_ckpt))
    data_val_placeholder = tf.placeholder(data_val.dtype, data_val.shape, name='data_val')
    label_val_placeholder = tf.placeholder(tf.int64, label_val.shape, name='label_val'
    
    for cloud_path in filenames:
      if os.path.isfile(cloud_path):
        
        #application specific on how to get ground truth for each pointcloud here
        
        
          pc = pypcd.PointCloud.from_path(cloud_path)
         
          if (pc.width==num_points):
            label= label_dict[str(label)]
            
            for j in range(0, num_points):
              pc_arr[j] = [pc.pc_data['x'][j], pc.pc_data['y'][j], pc.pc_data['z'][j], pc.pc_data['normal_x'][j], pc.pc_data['normal_y'][j], pc.pc_data['normal_z'][j]]
              
              
            data_val=np.expand_dims(pc_arr,axis=0)
            label_val=np.expand_dims(label,axis=0)
            sess.run(iterator_val.initializer, feed_dict={
                data_val_placeholder: data_val,
                label_val_placeholder: label_val,
            })

`

I was wondering if there was some line you could use similar to `prediction = sess.run(get_prediction ...'

Any help would be appreciated.

niallomahony93 avatar Dec 24 '19 09:12 niallomahony93

Hi,

I have gotten a bit further and am able to get the predictions by iterating 1 step at a time through the hdf5 file and logging results to a file, however the issue is now, the predictions don't seem to match very well with the ground truth data even when run on training data.

My code is below, Am I missing some model initialisation step?

#!/usr/bin/python3
"""Validation On Classification Task."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
import math
import random
import shutil
import argparse
import importlib
import data_utils
import numpy as np
import pointfly as pf
import tensorflow as tf
from datetime import datetime


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', '-t', help='Path to data', required=True)
    parser.add_argument('--path_val', '-v', help='Path to validation data')
    parser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load')
    parser.add_argument('--save_folder', '-s', help='Path to folder for saving check points and summary', required=True)
    parser.add_argument('--model', '-m', help='Model to use', required=True)
    parser.add_argument('--setting', '-x', help='Setting to use', required=True)
    parser.add_argument('--log', help='Log to FILE in save folder; use - for stdout (default is log.txt)', metavar='FILE', default='log.txt')
    parser.add_argument('--no_timestamp_folder', help='Dont save to timestamp folder', action='store_true')
    parser.add_argument('--no_code_backup', help='Dont backup code', action='store_true')
    args = parser.parse_args()

    if not args.no_timestamp_folder:
        time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
        root_folder = os.path.join(args.save_folder, '%s_%s_%s_%d' % (args.model, args.setting, time_string, os.getpid()))
    else:
        root_folder = args.save_folder
    if not os.path.exists(root_folder):
        os.makedirs(root_folder)

    if args.log != '-':
        sys.stdout = open(os.path.join(root_folder, args.log), 'w')

    print('PID:', os.getpid())

    print(args)

#    model_path = os.path.join(os.path.dirname(__file__), args.model)
#    sys.path.append(model_path)
    model = importlib.import_module(args.model)
    setting_path = os.path.join(os.path.dirname(__file__), args.model)
    sys.path.append(setting_path)
    print(setting_path)
    setting = importlib.import_module(args.setting)

    batch_size = setting.batch_size
    sample_num = setting.sample_num
    step_val = setting.step_val
    rotation_range = setting.rotation_range
    rotation_range_val = setting.rotation_range_val
    scaling_range = setting.scaling_range
    scaling_range_val = setting.scaling_range_val
    jitter = setting.jitter
    jitter_val = setting.jitter_val
    pool_setting_val = None if not hasattr(setting, 'pool_setting_val') else setting.pool_setting_val
    
    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data_val, label_val = setting.load_fn(args.path_val)
    
      
    if setting.save_ply_fn is not None:
        folder = os.path.join(root_folder, 'pts')
        print('{}-Saving samples as .ply files to {}...'.format(datetime.now(), folder))
        sample_num_for_ply = min(512, data_val.shape[0])
        if setting.map_fn is None:
            data_sample = data_val[:sample_num_for_ply]
        else:
            data_sample_list = []
            for idx in range(sample_num_for_ply):
                data_sample_list.append(setting.map_fn(data_val[idx], 0)[0])
            data_sample = np.stack(data_sample_list)
        setting.save_ply_fn(data_sample, folder)

    num_val = data_val.shape[0]
    point_num = data_val.shape[1]
    print('{}-{:d} validation samples.'.format(datetime.now(), num_val))

    ######################################################################
    # Placeholders
    indices = tf.placeholder(tf.int32, shape=(None, None, 2), name="indices")
    xforms = tf.placeholder(tf.float32, shape=(None, 3, 3), name="xforms")
    rotations = tf.placeholder(tf.float32, shape=(None, 3, 3), name="rotations")
    jitter_range = tf.placeholder(tf.float32, shape=(1), name="jitter_range")
    global_step = tf.Variable(0, trainable=False, name='global_step')
    is_training = tf.placeholder(tf.bool, name='is_training')

    
    data_val_placeholder = tf.placeholder(data_val.dtype, data_val.shape, name='data_val')
    label_val_placeholder = tf.placeholder(tf.int64, label_val.shape, name='label_val')
    handle = tf.placeholder(tf.string, shape=[], name='handle')

   ######################################################################
    
    dataset_val = tf.data.Dataset.from_tensor_slices((data_val_placeholder, label_val_placeholder))
    if setting.map_fn is not None:
        dataset_val = dataset_val.map(lambda data, label: tuple(tf.py_func(
            setting.map_fn, [data, label], [tf.float32, label.dtype])), num_parallel_calls=setting.num_parallel_calls)
    if setting.keep_remainder:
        dataset_val = dataset_val.batch(batch_size)
        batch_num_val = math.ceil(num_val / batch_size)
    else:
        dataset_val = dataset_val.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
        batch_num_val = math.floor(num_val / batch_size)
    iterator_val = dataset_val.make_initializable_iterator()
    print('{}-{:d} testing batches per test.'.format(datetime.now(), batch_num_val))

    iterator = tf.data.Iterator.from_string_handle(handle, dataset_val.output_types)
    (pts_fts, labels) = iterator.get_next()

    pts_fts_sampled = tf.gather_nd(pts_fts, indices=indices, name='pts_fts_sampled')
    features_augmented = None
    if setting.data_dim > 3:
        points_sampled, features_sampled = tf.split(pts_fts_sampled,
                                                    [3, setting.data_dim - 3],
                                                    axis=-1,
                                                    name='split_points_features')
        if setting.use_extra_features:
            if setting.with_normal_feature:
                if setting.data_dim < 6:
                    print('Only 3D normals are supported!')
                    exit()
                elif setting.data_dim == 6:
                    features_augmented = pf.augment(features_sampled, rotations)
                else:
                    normals, rest = tf.split(features_sampled, [3, setting.data_dim - 6])
                    normals_augmented = pf.augment(normals, rotations)
                    features_augmented = tf.concat([normals_augmented, rest], axis=-1)
            else:
                features_augmented = features_sampled
    else:
        points_sampled = pts_fts_sampled
    points_augmented = pf.augment(points_sampled, xforms, jitter_range)

    net = model.Net(points=points_augmented, features=features_augmented, is_training=is_training, setting=setting)
    logits = net.logits
    probs = tf.nn.softmax(logits, name='probs')
    predictions = tf.argmax(probs, axis=-1, name='predictions')

    labels_2d = tf.expand_dims(labels, axis=-1, name='labels_2d')
    labels_tile = tf.tile(labels_2d, (1, tf.shape(logits)[1]), name='labels_tile')
    loss_op = tf.losses.sparse_softmax_cross_entropy(labels=labels_tile, logits=logits)

    with tf.name_scope('metrics'):
        loss_mean_op, loss_mean_update_op = tf.metrics.mean(loss_op)
        t_1_acc_op, t_1_acc_update_op = tf.metrics.accuracy(labels_tile, predictions)
        t_1_per_class_acc_op, t_1_per_class_acc_update_op = tf.metrics.mean_per_class_accuracy(labels_tile,
                                                                                               predictions,
                                                                                               setting.num_class)
    reset_metrics_op = tf.variables_initializer([var for var in tf.local_variables()
                                                 if var.name.split('/')[0] == 'metrics'])

    
    _ = tf.summary.scalar('loss/val', tensor=loss_mean_op, collections=['val'])
    _ = tf.summary.scalar('t_1_acc/val', tensor=t_1_acc_op, collections=['val'])
    _ = tf.summary.scalar('t_1_per_class_acc/val', tensor=t_1_per_class_acc_op, collections=['val'])

#    lr_exp_op = tf.train.exponential_decay(setting.learning_rate_base, global_step, setting.decay_steps,
#                                           setting.decay_rate, staircase=True)
#    lr_clip_op = tf.maximum(lr_exp_op, setting.learning_rate_min)
#    _ = tf.summary.scalar('learning_rate', tensor=lr_clip_op, collections=['train'])
#    reg_loss = setting.weight_decay * tf.losses.get_regularization_loss()
#    if setting.optimizer == 'adam':
#        optimizer = tf.train.AdamOptimizer(learning_rate=lr_clip_op, epsilon=setting.epsilon)
#    elif setting.optimizer == 'momentum':
#        optimizer = tf.train.MomentumOptimizer(learning_rate=lr_clip_op, momentum=setting.momentum, use_nesterov=True)
#    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
#    with tf.control_dependencies(update_ops):
#        train_op = optimizer.minimize(loss_op + reg_loss, global_step=global_step)

    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

#    saver = tf.train.Saver(max_to_keep=None)

    # backup all code
    if not args.no_code_backup:
        code_folder = os.path.abspath(os.path.dirname(__file__))
        shutil.copytree(code_folder, os.path.join(root_folder, os.path.basename(code_folder)))

#    folder_ckpt = os.path.join(root_folder, 'ckpts')
#    if not os.path.exists(folder_ckpt):
#        os.makedirs(folder_ckpt)

    folder_summary = os.path.join(root_folder, 'summary')
    if not os.path.exists(folder_summary):
        os.makedirs(folder_summary)

    parameter_num = np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
    print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))

    with tf.Session() as sess:
        #summaries_op = tf.summary.merge_all('train')
        summaries_val_op = tf.summary.merge_all('val')
        summary_writer = tf.summary.FileWriter(folder_summary, sess.graph)

        sess.run(init_op)
    ######################################################################
    
        # Load the model
        if args.load_ckpt is not None:
            #saver.restore(sess, args.load_ckpt)
            saver = tf.train.import_meta_graph(args.load_ckpt+'.meta')
            saver.restore(sess, args.load_ckpt)
            print('{}-Checkpoint loaded from {}!'.format(datetime.now(), args.load_ckpt))
        
        #handle_train = sess.run(iterator_train.string_handle())
        handle_val = sess.run(iterator_val.string_handle())

    #    sess.run(iterator_train.initializer, feed_dict={
    #        data_train_placeholder: data_train,
    #        label_train_placeholder: label_train,
    #    })
        sess.run(iterator_val.initializer, feed_dict={
                    data_val_placeholder: data_val,
                    label_val_placeholder: label_val,
                })
        

        for batch_idx_val in range(batch_num_val):
            ######################################################################
            # Validation
                print("Batch Index: "+str(batch_idx_val))
#                sess.run(iterator.get_next(), feed_dict={
#                    data_val_placeholder: data_val,
#                    label_val_placeholder: label_val,
#                })
                
                #sess.run(reset_metrics_op)
            
                if not setting.keep_remainder \
                        or num_val % batch_size == 0 \
                        or batch_idx_val != batch_num_val - 1:
                    batch_size_val = batch_size
                else:
                    batch_size_val = num_val % batch_size
                xforms_np, rotations_np = pf.get_xforms(batch_size_val,
                                                        rotation_range=rotation_range_val,
                                                        scaling_range=scaling_range_val,
                                                        order=setting.rotation_order)
#                sess.run([loss_mean_update_op, t_1_acc_update_op, t_1_per_class_acc_update_op],
#                             feed_dict={
#                                 handle: handle_val,
#                                 indices: pf.get_indices(batch_size_val, sample_num, point_num,
#                                                         ),
#                                 xforms: xforms_np,
#                                 rotations: rotations_np,
#                                 jitter_range: np.array([jitter_val]),
#                                 is_training: False,
#                             })
#                loss_val, t_1_acc_val, t_1_per_class_acc_val, summaries_val, step = sess.run(
#                    [loss_mean_op, t_1_acc_op, t_1_per_class_acc_op, summaries_val_op, global_step])
#                summary_writer.add_summary(summaries_val, step)
#                print('{}-[Val  ]-Average:      Loss: {:.4f}  T-1 Acc: {:.4f}  T-1 mAcc: {:.4f}'
#                      .format(datetime.now(), loss_val, t_1_acc_val, t_1_per_class_acc_val))
#                
                print('Prediction:        Labels: ')
                pred, label=sess.run([predictions, labels_tile], feed_dict={
                             handle: handle_val,
                             indices: pf.get_indices(batch_size_val, sample_num, point_num,
                                                     ),
                             xforms: xforms_np,
                             rotations: rotations_np,
                             jitter_range: np.array([jitter_val]),
                             is_training: False,
                         })
                         
                print(pred)
                print(label)
                with open(os.path.join(root_folder,'UCD_prediction_log_pointcnn.txt'), 'a') as log_file:
                          log_file.write(str(pred)+','+str(label)+'\n')
                        
                sys.stdout.flush()
            ######################################################################

            ######################################################################
            ######################################################################
        print('{}-Done!'.format(datetime.now()))

if __name__ == '__main__':
    main()

and here is my setting file:

#!/usr/bin/python3

import os
import sys
import math

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils

load_fn = data_utils.load_cls
balance_fn = data_utils.balance_classes
map_fn = None
keep_remainder = True
save_ply_fn = None
#def save_ply_fn(data_sample, folder):
#    data_utils.save_ply_point_with_normal(data_sample, folder)

num_class = 11

sample_num = 2048

batch_size = 1

num_epochs = 1

step_val = 1

learning_rate_base = 0.01
decay_steps = 8000
decay_rate = 0.5
learning_rate_min = 1e-6

weight_decay = 1e-5

jitter = 0.001
jitter_val = 0.0

rotation_range = [0, math.pi/36, 0, 'g']
rotation_range_val = [0, 0, 0, 'u']
rotation_order = 'rxyz'

scaling_range = [0.1, 0.1, 0.1, 'g']
scaling_range_val = [0, 0, 0, 'u']

sample_num_variance = 1 // 8
sample_num_clip = 1 // 4

x = 3

xconv_param_name = ('K', 'D', 'P', 'C', 'links')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
                [(8, 1, -1, 16 * x, []),
                 (12, 2, 384, 32 * x, []),
                 (16, 2, 128, 64 * x, []),
                 (16, 3, 128, 128 * x, [])]]

with_global = True

fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
             [(128 * x, 0.0),
              (64 * x, 0.8)]]

sampling = 'random'

optimizer = 'adam'
epsilon = 1e-2

data_dim = 7
use_extra_features = True
with_normal_feature = False
with_X_transformation = True
sorting_method = None
`

niallomahony93 avatar Jan 01 '20 09:01 niallomahony93

I have sorted it - The code is below. It may contain some unnecessary lines from the training code


#!/usr/bin/python3
"""Validation On Classification Task."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
import math
import random
import shutil
import argparse
import importlib
import data_utils
import numpy as np
import pointfly as pf
import tensorflow as tf
from datetime import datetime

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', '-t', help='Path to data', required=True)
    parser.add_argument('--path_val', '-v', help='Path to validation data')
    parser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load')
    parser.add_argument('--save_folder', '-s', help='Path to folder for saving check points and summary', required=True)
    parser.add_argument('--model', '-m', help='Model to use', required=True)
    parser.add_argument('--setting', '-x', help='Setting to use', required=True)
    
    parser.add_argument('--log', help='Log to FILE in save folder; use - for stdout (default is log.txt)', metavar='FILE', default='log.txt')
    parser.add_argument('--no_timestamp_folder', help='Dont save to timestamp folder', action='store_true')
    parser.add_argument('--no_code_backup', help='Dont backup code', action='store_true')
    args = parser.parse_args()

    if not args.no_timestamp_folder:
        time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
        root_folder = os.path.join(args.save_folder, '%s_%s_%s_%d' % (args.model, args.setting, time_string, os.getpid()))
    else:
        root_folder = args.save_folder
    if not os.path.exists(root_folder):
        os.makedirs(root_folder)

    if args.log != '-':
        sys.stdout = open(os.path.join(root_folder, args.log), 'w')

    print('PID:', os.getpid())

    print(args)

#    model_path = os.path.join(os.path.dirname(__file__), args.model)
#    sys.path.append(model_path)
    model = importlib.import_module(args.model)
    setting_path = os.path.join(os.path.dirname(__file__), args.model)
    sys.path.append(setting_path)
    print(setting_path)
    setting = importlib.import_module(args.setting)

    batch_size = setting.batch_size
    sample_num = setting.sample_num
    step_val = setting.step_val
    rotation_range = setting.rotation_range
    rotation_range_val = setting.rotation_range_val
    scaling_range = setting.scaling_range
    scaling_range_val = setting.scaling_range_val
    jitter = setting.jitter
    jitter_val = setting.jitter_val
    pool_setting_val = None if not hasattr(setting, 'pool_setting_val') else setting.pool_setting_val
    
    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data_val, label_val, pc_id_val = setting.load_fn(args.path_val)
    
      
    if setting.save_ply_fn is not None:
        folder = os.path.join(root_folder, 'pts')
        print('{}-Saving samples as .ply files to {}...'.format(datetime.now(), folder))
        sample_num_for_ply = min(512, data_val.shape[0])
        if setting.map_fn is None:
            data_sample = data_val[:sample_num_for_ply]
        else:
            data_sample_list = []
            for idx in range(sample_num_for_ply):
                data_sample_list.append(setting.map_fn(data_val[idx], 0)[0])
            data_sample = np.stack(data_sample_list)
        setting.save_ply_fn(data_sample, folder)

    num_val = data_val.shape[0]
    point_num = data_val.shape[1]
    print('{}-{:d} validation samples.'.format(datetime.now(), num_val))

    ######################################################################
    # Placeholders
    indices = tf.placeholder(tf.int32, shape=(None, None, 2), name="indices")
    xforms = tf.placeholder(tf.float32, shape=(None, 3, 3), name="xforms")
    rotations = tf.placeholder(tf.float32, shape=(None, 3, 3), name="rotations")
    jitter_range = tf.placeholder(tf.float32, shape=(1), name="jitter_range")
    global_step = tf.Variable(0, trainable=False, name='global_step')
    is_training = tf.placeholder(tf.bool, name='is_training')

    
    data_val_placeholder = tf.placeholder(data_val.dtype, data_val.shape, name='data_val')
    label_val_placeholder = tf.placeholder(tf.int64, label_val.shape, name='label_val')
    pc_id_val_placeholder = tf.placeholder(tf.int64, pc_id_val.shape, name='pc_id_val')
    handle = tf.placeholder(tf.string, shape=[], name='handle')

   ######################################################################
    
    dataset_val = tf.data.Dataset.from_tensor_slices((data_val_placeholder, label_val_placeholder, pc_id_val_placeholder))
    if setting.map_fn is not None:
        dataset_val = dataset_val.map(lambda data, label: tuple(tf.py_func(
            setting.map_fn, [data, label, pc_id], [tf.float32, label.dtype, pc_id.dtype])), num_parallel_calls=setting.num_parallel_calls)
    if setting.keep_remainder:
        dataset_val = dataset_val.batch(batch_size)
        batch_num_val = math.ceil(num_val / batch_size)
    else:
        dataset_val = dataset_val.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
        batch_num_val = math.floor(num_val / batch_size)
    iterator_val = dataset_val.make_initializable_iterator()
    print('{}-{:d} testing batches per test.'.format(datetime.now(), batch_num_val))

    iterator = tf.data.Iterator.from_string_handle(handle, dataset_val.output_types)
    (pts_fts, labels, pc_ids) = iterator.get_next()

    pts_fts_sampled = tf.gather_nd(pts_fts, indices=indices, name='pts_fts_sampled')
    features_augmented = None
    if setting.data_dim > 3:
        points_sampled, features_sampled = tf.split(pts_fts_sampled,
                                                    [3, setting.data_dim - 3],
                                                    axis=-1,
                                                    name='split_points_features')
        if setting.use_extra_features:
            if setting.with_normal_feature:
                if setting.data_dim < 6:
                    print('Only 3D normals are supported!')
                    exit()
                elif setting.data_dim == 6:
                    features_augmented = pf.augment(features_sampled, rotations)
                else:
                    normals, rest = tf.split(features_sampled, [3, setting.data_dim - 6])
                    normals_augmented = pf.augment(normals, rotations)
                    features_augmented = tf.concat([normals_augmented, rest], axis=-1)
            else:
                features_augmented = features_sampled
    else:
        points_sampled = pts_fts_sampled
    points_augmented = pf.augment(points_sampled, xforms, jitter_range)

    net = model.Net(points=points_augmented, features=features_augmented, is_training=is_training, setting=setting)
    logits = net.logits
    probs_op = tf.nn.softmax(logits, name='probs')
    
    saver = tf.train.Saver()

    parameter_num = np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
    print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))
    
    with open(os.path.join(root_folder,'DOC_prediction_log_pointcnn.txt'), 'w') as log_file:
      log_file.write("Cow ID, Reference,  Inference\n")
    prev_pc_id=0
    
    with tf.Session() as sess:
        #summaries_op = tf.summary.merge_all('train')
#        summaries_val_op = tf.summary.merge_all('val')
#        summary_writer = tf.summary.FileWriter(folder_summary, sess.graph)

        #sess.run(init_op)
    ######################################################################
    
        # Load the model
        if args.load_ckpt is not None:
            #saver.restore(sess, args.load_ckpt)
            saver = tf.train.import_meta_graph(args.load_ckpt+'.meta')
            saver.restore(sess, args.load_ckpt)
            print('{}-Checkpoint loaded from {}!'.format(datetime.now(), args.load_ckpt))
        
        #handle_train = sess.run(iterator_train.string_handle())
        handle_val = sess.run(iterator_val.string_handle())

    #    sess.run(iterator_train.initializer, feed_dict={
    #        data_train_placeholder: data_train,
    #        label_train_placeholder: label_train,
    #    })
        sess.run(iterator_val.initializer, feed_dict={
                    data_val_placeholder: data_val,
                    label_val_placeholder: label_val,
                    pc_id_val_placeholder: pc_id_val
                })
        

        for batch_idx_val in range(batch_num_val):
            ######################################################################
            # Validation
                print("Batch Index: "+str(batch_idx_val))
#                sess.run(iterator.get_next(), feed_dict={
#                    data_val_placeholder: data_val,
#                    label_val_placeholder: label_val,
#                })
                #sess.run(init_op)
                #sess.run(reset_metrics_op)
            
                if not setting.keep_remainder \
                        or num_val % batch_size == 0 \
                        or batch_idx_val != batch_num_val - 1:
                    batch_size_val = batch_size
                else:
                    batch_size_val = num_val % batch_size
                xforms_np, rotations_np = pf.get_xforms(batch_size_val,
                                                        rotation_range=rotation_range_val,
                                                        scaling_range=scaling_range_val,
                                                        order=setting.rotation_order)
#                sess.run([loss_mean_update_op, t_1_acc_update_op, t_1_per_class_acc_update_op],
#                             feed_dict={
#                                 handle: handle_val,
#                                 indices: pf.get_indices(batch_size_val, sample_num, point_num,
#                                                         ),
#                                 xforms: xforms_np,
#                                 rotations: rotations_np,
#                                 jitter_range: np.array([jitter_val]),
#                                 is_training: False,
#                             })
#                loss_val, t_1_acc_val, t_1_per_class_acc_val, summaries_val, step = sess.run(
#                    [loss_mean_op, t_1_acc_op, t_1_per_class_acc_op, summaries_val_op, global_step])
#                summary_writer.add_summary(summaries_val, step)
#                print('{}-[Val  ]-Average:      Loss: {:.4f}  T-1 Acc: {:.4f}  T-1 mAcc: {:.4f}'
#                      .format(datetime.now(), loss_val, t_1_acc_val, t_1_per_class_acc_val))
#                
                print('Prediction:        Labels: ')
                probs, label, pc_id=sess.run([probs_op, labels, pc_ids], feed_dict={
                             handle: handle_val,
                             indices: pf.get_indices(batch_size_val, sample_num, point_num,
                                                     ),
                             xforms: xforms_np,
                             rotations: rotations_np,
                             jitter_range: np.array([jitter_val]),
                             is_training: False,
                         })
                pred = np.argmax(probs)         
                print(pred)
                labels_2d = np.expand_dims(label, axis=-1)
                
                print(labels_2d)
                with open(os.path.join(root_folder,'DOC_prediction_log_pointcnn.txt'), 'a') as log_file:
                 log_file.seek(0, os.SEEK_END)
                 if(pc_id != prev_pc_id):
                    log_file.write('\n')
                    log_file.write(str(pc_id[0])+','+labels_2d[0][0]+',')
                 log_file.write(pred+',')
                prev_pc_id=pc_id 
                sys.stdout.flush()
            ######################################################################

            ######################################################################
            ######################################################################
        print('{}-Done!'.format(datetime.now()))

if __name__ == '__main__':
    main()

niallomahony93 avatar Jan 08 '20 09:01 niallomahony93

Hi, What is the meaning of the pc_id, it seems like that the load_cls only have two return values.

kevin-steiner avatar Apr 11 '20 03:04 kevin-steiner