Point cloud -> Range Image -> Point Cloud fails
Hey guys,
I'm trying to successfully reconvert a interpolated range image back to a 3d point cloud with labels, but I fail. The inspiration came from this paper: https://www.mdpi.com/1424-8220/23/1/322.
The conversion of 3D pc to range image is correct, but the reconversion seems not.
Original scene:
Interpolated scene (approach):
The new scene seems somehow curved and not aligned with the gt-boxes, like in the paper.
Paper results and expected output:
Thanks for any help!
What actually is your question? Would you mind sharing your code to see what the community could help you with?
@brmarkus Hey thank you for the quick answer.
My question is why I can't get a ground plane aligned pc again (image (c) in paper results). I have a curve over the x-axis (see interpolated scene).
Code (minimal Example):
# load validation data
with open(val_filename, 'rb') as f:
data_list = pickle.load(f)
points = data_list[5]['points'] # (N, 5)
points_xyz_int = points[:, :4]
beam_label = points[:, -1].astype(int)
augmentor = DataAugmentor(root_path=data_path, augmentor_configs=[], class_names=class_names, logger=common_utils.create_logger())
# KITTI scanning parameters, obtained from Hough transformation (source: https://github.com/tusen-ai/RangeDet/blob/5078ce339c6d27a009aed1ca2790911ce4d10bc7/datasets/create_range_image_in_kitti.py#L210)
height = np.loadtxt("/home/rlab10/OpenPCDet/data/kitti/training/kitti scanning parameters/height.txt")
zenith = np.loadtxt("/home/rlab10/OpenPCDet/data/kitti/training/kitti scanning parameters/zenith.txt")
incl = np.loadtxt("/home/rlab10/OpenPCDet/data/kitti/training/kitti scanning parameters/incl.txt")
# 1. Point Cloud -> Low-resolution 2D Range Image
range_image = augmentor_utils.get_range_image_hdl64e(points_xyz_int, incl=incl, height=height)
# 2. Better Upsampling/Interpolation on Range-Image (source: https://www.mdpi.com/1424-8220/23/1/322)
range_image_upsampled_ = augmentor.pixel_distance_range_weighted_interpolation(range_image, MAX_RANGE=70.0)
points_result = augmentor_utils.range_image_to_cartesian(range_image=range_image_upsampled_, incl=incl[::-1], beam_label=True)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
x = points_result[:, 0]
y = points_result[:, 1]
z = points_result[:, 2]
ax.scatter(x, y, z, s=0.1, c=z, cmap='plasma')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('3D Point Cloud (points_result)')
plt.show()
Functions:
# source: https://github.com/tusen-ai/RangeDet/blob/1df87b2d9aa9ef3f77ad634c2656b96867c7eac8/datasets/create_range_image_in_kitti.py#L107
def get_range_image_hdl64e(points, incl, height):
incl_deg = incl * 180 / 3.1415
# print(incl - np.roll(incl, 1))
xy_norm = np.linalg.norm(points[:, :2], ord = 2, axis = 1)
error_list = []
for i in range(len(incl)):
h = height[i]
theta = incl[i]
error = np.abs(theta - np.arctan2(h - points[:,2], xy_norm))
error_list.append(error)
all_error = np.stack(error_list, axis=-1)
row_inds = np.argmin(all_error, axis=-1)
azi = np.arctan2(points[:,1], points[:,0])
# points per beam
width = 2048 # points per laser per revolution (data sheet: 2083)
# old
col_inds = width - 1.0 + 0.5 - (azi + np.pi) / (2.0 * np.pi) * width # right to left (Azimuth +π -> -π)
col_inds = np.round(col_inds).astype(np.int32)
col_inds[col_inds == width] = width - 1
col_inds[col_inds < 0] = 0
# new
#col_inds = ((azi + np.pi) / (2.0 * np.pi) * width).astype(np.int32) # left to right (Azimuth -π -> +π)
#col_inds = np.clip(col_inds, 0, width - 1)
# Ch 0: Range
# Ch 1: x
# Ch 2: y
# Ch 3: z
# Ch 4: intensity
empty_range_image = np.full((64, width, 5), -1, dtype = np.float32)
point_range = np.linalg.norm(points[:,:3], axis = 1, ord = 2)
order = np.argsort(-point_range)
point_range = point_range[order]
points = points[order]
row_inds = row_inds[order]
col_inds = col_inds[order]
empty_range_image[row_inds, col_inds, :] = np.concatenate([point_range[:,None], points], axis = 1)
return empty_range_image
def range_image_to_cartesian(range_image, incl, azimuth_min=-np.pi, azimuth_max=np.pi, beam_label=False):
H, W, C = range_image.shape # (64, 2048, 5)
azimuths = np.linspace(azimuth_min, azimuth_max, W, endpoint=False)
inclinations = incl
azimuth_grid, incl_grid = np.meshgrid(azimuths, inclinations)
ranges = range_image[:, :, 0]
intensity = range_image[:, :, 4] if C > 4 else np.zeros_like(ranges)
mask = ranges > 0
r = ranges[mask]
azimuth = azimuth_grid[mask] # θ
inclination = incl_grid[mask] # φ (inclination, vertical angle from xy-plane)
intensity = intensity[mask]
# Spherical to cartesian (where elevation is the polar angle (from the z-axis).)
x = r * np.cos(inclination) * np.cos(azimuth)
y = r * np.cos(inclination) * np.sin(azimuth)
z = r * np.sin(inclination)
# x = radius * cos(phi) * sin(theta) # phi = ele; theta=azi
# y = radius * sin(phi) * sin(theta)
# z = radius * cos(theta)
if beam_label:
beam_label = np.repeat(np.arange(H)[:, None], W, axis=1)[mask] # Line number as beam label (see row_inds = beam_labels[order])
points = np.stack([x, y, z, intensity, beam_label], axis=-1)
else:
points = np.stack([x, y, z, intensity], axis=-1)
return points
with incl=incl in .range_image_to_cartesian(). The scene is upside down, objects are pointing downwards.:
with incl=incl[::1] in .range_image_to_cartesian()`. The scene is curved, but objects are in correct position: