Compare commits
34 Commits
ab_local_o
...
d7fb64ed13
Author | SHA1 | Date | |
---|---|---|---|
d7fb64ed13 | |||
5a03659112 | |||
fca984e76b | |||
dec67e8255 | |||
1535a48a3f | |||
9c2625b11e | |||
2dfb6c57ce | |||
88d44f020e | |||
34548c64a3 | |||
47ea0ac434 | |||
91cabec977 | |||
445e9dc00b | |||
6ce3760471 | |||
47624f12cf | |||
501975457f | |||
155b655938 | |||
2c8ef20321 | |||
![]() |
493639287e | ||
![]() |
6a608ea74b | ||
![]() |
6f427785b3 | ||
![]() |
5bcd0fc6e3 | ||
![]() |
2b7243d1be | ||
04d3a359e1 | |||
287983277a | |||
982a3b9b60 | |||
ecd4cfa806 | |||
985a08d89c | |||
b221036e8b | |||
097712c0ea | |||
a954ed0998 | |||
f5f8e4266f | |||
8a05b7883d | |||
e23697eb87 | |||
2487039445 |
@@ -1,5 +1,6 @@
|
||||
from PytorchBoot.application import PytorchBootApplication
|
||||
from runners.inferencer import Inferencer
|
||||
from runners.inference_server import InferencerServer
|
||||
|
||||
@PytorchBootApplication("inference")
|
||||
class InferenceApp:
|
||||
@@ -14,3 +15,17 @@ class InferenceApp:
|
||||
Evaluator("path_to_your_eval_config").run()
|
||||
'''
|
||||
Inferencer("./configs/local/inference_config.yaml").run()
|
||||
|
||||
@PytorchBootApplication("server")
|
||||
class InferenceServerApp:
|
||||
@staticmethod
|
||||
def start():
|
||||
'''
|
||||
call default or your custom runners here, code will be executed
|
||||
automatically when type "pytorch-boot run" or "ptb run" in terminal
|
||||
|
||||
example:
|
||||
Trainer("path_to_your_train_config").run()
|
||||
Evaluator("path_to_your_eval_config").run()
|
||||
'''
|
||||
InferencerServer("./configs/server/server_inference_server_config.yaml").run()
|
162
beans/predict_result.py
Normal file
162
beans/predict_result.py
Normal file
@@ -0,0 +1,162 @@
|
||||
import numpy as np
|
||||
from sklearn.cluster import DBSCAN
|
||||
|
||||
class PredictResult:
|
||||
def __init__(self, raw_predict_result, input_pts=None, cluster_params=dict(eps=0.5, min_samples=2)):
|
||||
self.input_pts = input_pts
|
||||
self.cluster_params = cluster_params
|
||||
self.sampled_9d_pose = raw_predict_result
|
||||
self.sampled_matrix_pose = self.get_sampled_matrix_pose()
|
||||
self.distance_matrix = self.calculate_distance_matrix()
|
||||
self.clusters = self.get_cluster_result()
|
||||
self.candidate_matrix_poses = self.get_candidate_poses()
|
||||
self.candidate_9d_poses = [np.concatenate((self.matrix_to_rotation_6d_numpy(matrix[:3,:3]), matrix[:3,3].reshape(-1,)), axis=-1) for matrix in self.candidate_matrix_poses]
|
||||
self.cluster_num = len(self.clusters)
|
||||
|
||||
@staticmethod
|
||||
def rotation_6d_to_matrix_numpy(d6):
|
||||
a1, a2 = d6[:3], d6[3:]
|
||||
b1 = a1 / np.linalg.norm(a1)
|
||||
b2 = a2 - np.dot(b1, a2) * b1
|
||||
b2 = b2 / np.linalg.norm(b2)
|
||||
b3 = np.cross(b1, b2)
|
||||
return np.stack((b1, b2, b3), axis=-2)
|
||||
|
||||
@staticmethod
|
||||
def matrix_to_rotation_6d_numpy(matrix):
|
||||
return np.copy(matrix[:2, :]).reshape((6,))
|
||||
|
||||
def __str__(self):
|
||||
info = "Predict Result:\n"
|
||||
info += f" Predicted pose number: {len(self.sampled_9d_pose)}\n"
|
||||
info += f" Cluster number: {self.cluster_num}\n"
|
||||
for i, cluster in enumerate(self.clusters):
|
||||
info += f" - Cluster {i} size: {len(cluster)}\n"
|
||||
max_distance = np.max(self.distance_matrix[self.distance_matrix != 0])
|
||||
min_distance = np.min(self.distance_matrix[self.distance_matrix != 0])
|
||||
info += f" Max distance: {max_distance}\n"
|
||||
info += f" Min distance: {min_distance}\n"
|
||||
return info
|
||||
|
||||
def get_sampled_matrix_pose(self):
|
||||
sampled_matrix_pose = []
|
||||
for pose in self.sampled_9d_pose:
|
||||
rotation = pose[:6]
|
||||
translation = pose[6:]
|
||||
pose = self.rotation_6d_to_matrix_numpy(rotation)
|
||||
pose = np.concatenate((pose, translation.reshape(-1, 1)), axis=-1)
|
||||
pose = np.concatenate((pose, np.array([[0, 0, 0, 1]])), axis=-2)
|
||||
sampled_matrix_pose.append(pose)
|
||||
return np.array(sampled_matrix_pose)
|
||||
|
||||
def rotation_distance(self, R1, R2):
|
||||
R = np.dot(R1.T, R2)
|
||||
trace = np.trace(R)
|
||||
angle = np.arccos(np.clip((trace - 1) / 2, -1, 1))
|
||||
return angle
|
||||
|
||||
def calculate_distance_matrix(self):
|
||||
n = len(self.sampled_matrix_pose)
|
||||
dist_matrix = np.zeros((n, n))
|
||||
for i in range(n):
|
||||
for j in range(n):
|
||||
dist_matrix[i, j] = self.rotation_distance(self.sampled_matrix_pose[i][:3, :3], self.sampled_matrix_pose[j][:3, :3])
|
||||
return dist_matrix
|
||||
|
||||
def cluster_rotations(self):
|
||||
clustering = DBSCAN(eps=self.cluster_params['eps'], min_samples=self.cluster_params['min_samples'], metric='precomputed')
|
||||
labels = clustering.fit_predict(self.distance_matrix)
|
||||
return labels
|
||||
|
||||
def get_cluster_result(self):
|
||||
labels = self.cluster_rotations()
|
||||
cluster_num = len(set(labels)) - (1 if -1 in labels else 0)
|
||||
clusters = []
|
||||
for _ in range(cluster_num):
|
||||
clusters.append([])
|
||||
for matrix_pose, label in zip(self.sampled_matrix_pose, labels):
|
||||
if label != -1:
|
||||
clusters[label].append(matrix_pose)
|
||||
clusters.sort(key=len, reverse=True)
|
||||
return clusters
|
||||
|
||||
def get_center_matrix_pose_from_cluster(self, cluster):
|
||||
min_total_distance = float('inf')
|
||||
center_matrix_pose = None
|
||||
|
||||
for matrix_pose in cluster:
|
||||
total_distance = 0
|
||||
for other_matrix_pose in cluster:
|
||||
rot_distance = self.rotation_distance(matrix_pose[:3, :3], other_matrix_pose[:3, :3])
|
||||
total_distance += rot_distance
|
||||
|
||||
if total_distance < min_total_distance:
|
||||
min_total_distance = total_distance
|
||||
center_matrix_pose = matrix_pose
|
||||
|
||||
return center_matrix_pose
|
||||
|
||||
def get_candidate_poses(self):
|
||||
candidate_poses = []
|
||||
for cluster in self.clusters:
|
||||
candidate_poses.append(self.get_center_matrix_pose_from_cluster(cluster))
|
||||
return candidate_poses
|
||||
|
||||
def visualize(self):
|
||||
import plotly.graph_objects as go
|
||||
fig = go.Figure()
|
||||
if self.input_pts is not None:
|
||||
fig.add_trace(go.Scatter3d(
|
||||
x=self.input_pts[:, 0], y=self.input_pts[:, 1], z=self.input_pts[:, 2],
|
||||
mode='markers', marker=dict(size=1, color='gray', opacity=0.5), name='Input Points'
|
||||
))
|
||||
colors = ['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
|
||||
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg']
|
||||
for i, cluster in enumerate(self.clusters):
|
||||
color = colors[i]
|
||||
candidate_pose = self.candidate_matrix_poses[i]
|
||||
origin_candidate = candidate_pose[:3, 3]
|
||||
z_axis_candidate = candidate_pose[:3, 2]
|
||||
for pose in cluster:
|
||||
origin = pose[:3, 3]
|
||||
z_axis = pose[:3, 2]
|
||||
fig.add_trace(go.Cone(
|
||||
x=[origin[0]], y=[origin[1]], z=[origin[2]],
|
||||
u=[z_axis[0]], v=[z_axis[1]], w=[z_axis[2]],
|
||||
colorscale=color,
|
||||
sizemode="absolute", sizeref=0.05, anchor="tail", showscale=False
|
||||
))
|
||||
fig.add_trace(go.Cone(
|
||||
x=[origin_candidate[0]], y=[origin_candidate[1]], z=[origin_candidate[2]],
|
||||
u=[z_axis_candidate[0]], v=[z_axis_candidate[1]], w=[z_axis_candidate[2]],
|
||||
colorscale=color,
|
||||
sizemode="absolute", sizeref=0.1, anchor="tail", showscale=False
|
||||
))
|
||||
|
||||
fig.update_layout(
|
||||
title="Clustered Poses and Input Points",
|
||||
scene=dict(
|
||||
xaxis_title='X',
|
||||
yaxis_title='Y',
|
||||
zaxis_title='Z'
|
||||
),
|
||||
margin=dict(l=0, r=0, b=0, t=40),
|
||||
scene_camera=dict(eye=dict(x=1.25, y=1.25, z=1.25))
|
||||
)
|
||||
|
||||
fig.show()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
step = 0
|
||||
raw_predict_result = np.load(f"inference_result_pack/inference_result_pack/{step}/all_pred_pose_9d.npy")
|
||||
input_pts = np.loadtxt(f"inference_result_pack/inference_result_pack/{step}/input_pts.txt")
|
||||
print(raw_predict_result.shape)
|
||||
predict_result = PredictResult(raw_predict_result, input_pts, cluster_params=dict(eps=0.25, min_samples=3))
|
||||
print(predict_result)
|
||||
print(len(predict_result.candidate_matrix_poses))
|
||||
print(predict_result.distance_matrix)
|
||||
#import ipdb; ipdb.set_trace()
|
||||
predict_result.visualize()
|
||||
|
@@ -1,76 +1,76 @@
|
||||
|
||||
runner:
|
||||
general:
|
||||
seed: 1
|
||||
seed: 0
|
||||
device: cuda
|
||||
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
||||
|
||||
experiment:
|
||||
name: w_gf_wo_lf_full
|
||||
name: train_ab_global_only_dense
|
||||
root_dir: "experiments"
|
||||
epoch: 1 # -1 stands for last epoch
|
||||
epoch: 441 # -1 stands for last epoch
|
||||
|
||||
test:
|
||||
dataset_list:
|
||||
- OmniObject3d_train
|
||||
- OmniObject3d_test
|
||||
|
||||
blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py"
|
||||
output_dir: "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/test/inference_global_full_on_testset"
|
||||
pipeline: nbv_reconstruction_global_pts_pipeline
|
||||
|
||||
output_dir: "/media/hofee/data/data/p++_dense"
|
||||
pipeline: nbv_reconstruction_pipeline
|
||||
voxel_size: 0.003
|
||||
min_new_area: 1.0
|
||||
dataset:
|
||||
OmniObject3d_train:
|
||||
root_dir: "/media/hofee/repository/nbv_reconstruction_data_512"
|
||||
# OmniObject3d_train:
|
||||
# root_dir: "C:\\Document\\Datasets\\inference_test1"
|
||||
# model_dir: "C:\\Document\\Datasets\\scaled_object_meshes"
|
||||
# source: seq_reconstruction_dataset_preprocessed
|
||||
# split_file: "C:\\Document\\Datasets\\data_list\\sample.txt"
|
||||
# type: test
|
||||
# filter_degree: 75
|
||||
# ratio: 1
|
||||
# batch_size: 1
|
||||
# num_workers: 12
|
||||
# pts_num: 8192
|
||||
# load_from_preprocess: True
|
||||
|
||||
OmniObject3d_test:
|
||||
root_dir: "/media/hofee/data/data/new_testset_output"
|
||||
model_dir: "/media/hofee/data/data/scaled_object_meshes"
|
||||
source: seq_nbv_reconstruction_dataset
|
||||
split_file: "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/test/test_set_list.txt"
|
||||
source: seq_reconstruction_dataset_preprocessed
|
||||
# split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt"
|
||||
type: test
|
||||
filter_degree: 75
|
||||
ratio: 1
|
||||
eval_list:
|
||||
- pose_diff
|
||||
- coverage_rate_increase
|
||||
ratio: 0.1
|
||||
batch_size: 1
|
||||
num_workers: 12
|
||||
pts_num: 4096
|
||||
load_from_preprocess: False
|
||||
pts_num: 8192
|
||||
load_from_preprocess: True
|
||||
|
||||
pipeline:
|
||||
nbv_reconstruction_local_pts_pipeline:
|
||||
nbv_reconstruction_pipeline:
|
||||
modules:
|
||||
pts_encoder: pointnet_encoder
|
||||
pts_encoder: pointnet++_encoder
|
||||
seq_encoder: transformer_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
eps: 1e-5
|
||||
global_scanned_feat: False
|
||||
|
||||
nbv_reconstruction_global_pts_pipeline:
|
||||
modules:
|
||||
pts_encoder: pointnet_encoder
|
||||
pose_seq_encoder: transformer_pose_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
eps: 1e-5
|
||||
global_scanned_feat: True
|
||||
|
||||
|
||||
|
||||
module:
|
||||
pointnet++_encoder:
|
||||
in_dim: 3
|
||||
params_name: light
|
||||
|
||||
pointnet_encoder:
|
||||
in_dim: 3
|
||||
out_dim: 1024
|
||||
global_feat: True
|
||||
feature_transform: False
|
||||
|
||||
transformer_seq_encoder:
|
||||
pts_embed_dim: 1024
|
||||
pose_embed_dim: 256
|
||||
num_heads: 4
|
||||
ffn_dim: 256
|
||||
num_layers: 3
|
||||
output_dim: 2048
|
||||
|
||||
transformer_pose_seq_encoder:
|
||||
pose_embed_dim: 256
|
||||
embed_dim: 256
|
||||
num_heads: 4
|
||||
ffn_dim: 256
|
||||
num_layers: 3
|
||||
@@ -86,7 +86,8 @@ module:
|
||||
sample_mode: ode
|
||||
sampling_steps: 500
|
||||
sde_mode: ve
|
||||
|
||||
pose_encoder:
|
||||
pose_dim: 9
|
||||
out_dim: 256
|
||||
pts_num_encoder:
|
||||
out_dim: 64
|
@@ -22,6 +22,6 @@ runner:
|
||||
|
||||
datasets:
|
||||
OmniObject3d:
|
||||
root_dir: /data/hofee/nbv_rec_part2_preprocessed
|
||||
from: 155
|
||||
to: 165 # ..-1 means end
|
||||
root_dir: /media/hofee/data/data/test_bottle/view
|
||||
from: 0
|
||||
to: -1 # ..-1 means end
|
||||
|
@@ -8,11 +8,11 @@ runner:
|
||||
root_dir: experiments
|
||||
generate:
|
||||
port: 5002
|
||||
from: 600
|
||||
to: -1 # -1 means all
|
||||
object_dir: /media/hofee/data/data/object_meshes_part1
|
||||
table_model_path: "/media/hofee/data/data/others/table.obj"
|
||||
output_dir: /media/hofee/repository/data_part_1
|
||||
from: 0
|
||||
to: 50 # -1 means all
|
||||
object_dir: /media/hofee/data/data/test_bottle/bottle_mesh
|
||||
table_model_path: /media/hofee/data/data/others/table.obj
|
||||
output_dir: /media/hofee/data/data/test_bottle/view
|
||||
binocular_vision: true
|
||||
plane_size: 10
|
||||
max_views: 512
|
||||
@@ -34,7 +34,7 @@ runner:
|
||||
max_y: 0.05
|
||||
min_z: 0.01
|
||||
max_z: 0.01
|
||||
random_rotation_ratio: 0.3
|
||||
random_rotation_ratio: 0.0
|
||||
random_objects:
|
||||
num: 4
|
||||
cluster: 0.9
|
||||
|
53
configs/server/server_inference_server_config.yaml
Normal file
53
configs/server/server_inference_server_config.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
|
||||
runner:
|
||||
general:
|
||||
seed: 0
|
||||
device: cuda
|
||||
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
||||
|
||||
experiment:
|
||||
name: train_ab_global_only
|
||||
root_dir: "experiments"
|
||||
epoch: -1 # -1 stands for last epoch
|
||||
|
||||
pipeline: nbv_reconstruction_pipeline
|
||||
voxel_size: 0.003
|
||||
|
||||
pipeline:
|
||||
nbv_reconstruction_pipeline:
|
||||
modules:
|
||||
pts_encoder: pointnet_encoder
|
||||
seq_encoder: transformer_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
eps: 1e-5
|
||||
global_scanned_feat: True
|
||||
|
||||
module:
|
||||
pointnet_encoder:
|
||||
in_dim: 3
|
||||
out_dim: 1024
|
||||
global_feat: True
|
||||
feature_transform: False
|
||||
transformer_seq_encoder:
|
||||
embed_dim: 256
|
||||
num_heads: 4
|
||||
ffn_dim: 256
|
||||
num_layers: 3
|
||||
output_dim: 1024
|
||||
|
||||
gf_view_finder:
|
||||
t_feat_dim: 128
|
||||
pose_feat_dim: 256
|
||||
main_feat_dim: 2048
|
||||
regression_head: Rx_Ry_and_T
|
||||
pose_mode: rot_matrix
|
||||
per_point_feature: False
|
||||
sample_mode: ode
|
||||
sampling_steps: 500
|
||||
sde_mode: ve
|
||||
pose_encoder:
|
||||
pose_dim: 9
|
||||
out_dim: 256
|
||||
pts_num_encoder:
|
||||
out_dim: 64
|
@@ -3,11 +3,11 @@ runner:
|
||||
general:
|
||||
seed: 0
|
||||
device: cuda
|
||||
cuda_visible_devices: "1"
|
||||
cuda_visible_devices: "0"
|
||||
parallel: False
|
||||
|
||||
experiment:
|
||||
name: debug
|
||||
name: train_ab_global_only_with_wp_p++_strong
|
||||
root_dir: "experiments"
|
||||
use_checkpoint: False
|
||||
epoch: -1 # -1 stands for last epoch
|
||||
@@ -25,87 +25,68 @@ runner:
|
||||
test:
|
||||
frequency: 3 # test frequency
|
||||
dataset_list:
|
||||
#- OmniObject3d_test
|
||||
- OmniObject3d_test
|
||||
- OmniObject3d_val
|
||||
|
||||
pipeline: nbv_reconstruction_global_pts_n_num_pipeline
|
||||
pipeline: nbv_reconstruction_pipeline
|
||||
|
||||
dataset:
|
||||
OmniObject3d_train:
|
||||
root_dir: "/home/data/hofee/project/nbv_rec/data/sample_for_training_new"
|
||||
root_dir: "/data/hofee/data/new_full_data"
|
||||
model_dir: "../data/scaled_object_meshes"
|
||||
source: nbv_reconstruction_dataset
|
||||
split_file: "/home/data/hofee/project/nbv_rec/data/sample.txt"
|
||||
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt"
|
||||
type: train
|
||||
cache: True
|
||||
ratio: 1
|
||||
batch_size: 160
|
||||
num_workers: 16
|
||||
batch_size: 64
|
||||
num_workers: 128
|
||||
pts_num: 8192
|
||||
load_from_preprocess: True
|
||||
|
||||
OmniObject3d_test:
|
||||
root_dir: "/home/data/hofee/project/nbv_rec/data/sample_for_training_new"
|
||||
root_dir: "/data/hofee/data/new_full_data"
|
||||
model_dir: "../data/scaled_object_meshes"
|
||||
source: nbv_reconstruction_dataset
|
||||
split_file: "/home/data/hofee/project/nbv_rec/data/sample.txt"
|
||||
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_test.txt"
|
||||
type: test
|
||||
cache: True
|
||||
filter_degree: 75
|
||||
eval_list:
|
||||
- pose_diff
|
||||
ratio: 0.05
|
||||
batch_size: 160
|
||||
ratio: 1
|
||||
batch_size: 80
|
||||
num_workers: 12
|
||||
pts_num: 8192
|
||||
load_from_preprocess: True
|
||||
|
||||
OmniObject3d_val:
|
||||
root_dir: "/home/data/hofee/project/nbv_rec/data/sample_for_training_new"
|
||||
root_dir: "/data/hofee/data/new_full_data"
|
||||
model_dir: "../data/scaled_object_meshes"
|
||||
source: nbv_reconstruction_dataset
|
||||
split_file: "/home/data/hofee/project/nbv_rec/data/sample.txt"
|
||||
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt"
|
||||
type: test
|
||||
cache: True
|
||||
filter_degree: 75
|
||||
eval_list:
|
||||
- pose_diff
|
||||
ratio: 0.005
|
||||
batch_size: 160
|
||||
ratio: 0.1
|
||||
batch_size: 80
|
||||
num_workers: 12
|
||||
pts_num: 8192
|
||||
load_from_preprocess: True
|
||||
|
||||
|
||||
pipeline:
|
||||
nbv_reconstruction_local_pts_pipeline:
|
||||
nbv_reconstruction_pipeline:
|
||||
modules:
|
||||
pts_encoder: pointnet_encoder
|
||||
pts_encoder: pointnet++_encoder
|
||||
seq_encoder: transformer_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
eps: 1e-5
|
||||
global_scanned_feat: True
|
||||
|
||||
nbv_reconstruction_global_pts_pipeline:
|
||||
modules:
|
||||
pts_encoder: pointnet_encoder
|
||||
pose_seq_encoder: transformer_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
eps: 1e-5
|
||||
global_scanned_feat: True
|
||||
|
||||
nbv_reconstruction_global_pts_n_num_pipeline:
|
||||
modules:
|
||||
pts_encoder: pointnet_encoder
|
||||
transformer_seq_encoder: transformer_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
pts_num_encoder: pts_num_encoder
|
||||
eps: 1e-5
|
||||
global_scanned_feat: True
|
||||
|
||||
|
||||
module:
|
||||
|
||||
@@ -115,6 +96,10 @@ module:
|
||||
global_feat: True
|
||||
feature_transform: False
|
||||
|
||||
pointnet++_encoder:
|
||||
in_dim: 3
|
||||
params_name: strong
|
||||
|
||||
transformer_seq_encoder:
|
||||
embed_dim: 256
|
||||
num_heads: 4
|
||||
@@ -125,7 +110,7 @@ module:
|
||||
gf_view_finder:
|
||||
t_feat_dim: 128
|
||||
pose_feat_dim: 256
|
||||
main_feat_dim: 2048
|
||||
main_feat_dim: 5120
|
||||
regression_head: Rx_Ry_and_T
|
||||
pose_mode: rot_matrix
|
||||
per_point_feature: False
|
||||
|
@@ -4,6 +4,7 @@ import PytorchBoot.namespace as namespace
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from PytorchBoot.config import ConfigManager
|
||||
from PytorchBoot.utils.log_util import Log
|
||||
|
||||
import torch
|
||||
import os
|
||||
import sys
|
||||
@@ -34,7 +35,7 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
#self.model_dir = config["model_dir"]
|
||||
self.filter_degree = config["filter_degree"]
|
||||
if self.type == namespace.Mode.TRAIN:
|
||||
scale_ratio = 100
|
||||
scale_ratio = 1
|
||||
self.datalist = self.datalist*scale_ratio
|
||||
if self.cache:
|
||||
expr_root = ConfigManager.get("runner", "experiment", "root_dir")
|
||||
@@ -50,7 +51,7 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
scene_name_list.append(scene_name)
|
||||
return scene_name_list
|
||||
|
||||
def get_datalist(self):
|
||||
def get_datalist(self, bias=False):
|
||||
datalist = []
|
||||
for scene_name in self.scene_name_list:
|
||||
seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
|
||||
@@ -79,6 +80,8 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
for data_pair in label_data["data_pairs"]:
|
||||
scanned_views = data_pair[0]
|
||||
next_best_view = data_pair[1]
|
||||
accept_probability = scanned_views[-1][1]
|
||||
if accept_probability > np.random.rand():
|
||||
datalist.append(
|
||||
{
|
||||
"scanned_views": scanned_views,
|
||||
@@ -206,14 +209,9 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
collate_data["combined_scanned_pts"] = torch.stack(
|
||||
[torch.tensor(item["combined_scanned_pts"]) for item in batch]
|
||||
)
|
||||
collate_data["scanned_pts_mask"] = torch.stack(
|
||||
[torch.tensor(item["scanned_pts_mask"]) for item in batch]
|
||||
)
|
||||
|
||||
for key in batch[0].keys():
|
||||
if key not in [
|
||||
"scanned_pts",
|
||||
"scanned_pts_mask",
|
||||
"scanned_n_to_world_pose_9d",
|
||||
"best_to_world_pose_9d",
|
||||
"combined_scanned_pts",
|
||||
@@ -232,9 +230,10 @@ if __name__ == "__main__":
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
config = {
|
||||
"root_dir": "/data/hofee/data/packed_preprocessed_data",
|
||||
"root_dir": "/data/hofee/data/new_full_data",
|
||||
"model_dir": "../data/scaled_object_meshes",
|
||||
"source": "nbv_reconstruction_dataset",
|
||||
"split_file": "/data/hofee/data/OmniObject3d_train.txt",
|
||||
"split_file": "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt",
|
||||
"load_from_preprocess": True,
|
||||
"ratio": 0.5,
|
||||
"batch_size": 2,
|
||||
|
154
core/old_seq_dataset.py
Normal file
154
core/old_seq_dataset.py
Normal file
@@ -0,0 +1,154 @@
|
||||
import numpy as np
|
||||
from PytorchBoot.dataset import BaseDataset
|
||||
import PytorchBoot.namespace as namespace
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from PytorchBoot.utils.log_util import Log
|
||||
import torch
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(r"/home/data/hofee/project/nbv_rec/nbv_reconstruction")
|
||||
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.pose import PoseUtil
|
||||
from utils.pts import PtsUtil
|
||||
|
||||
@stereotype.dataset("old_seq_nbv_reconstruction_dataset")
|
||||
class SeqNBVReconstructionDataset(BaseDataset):
|
||||
def __init__(self, config):
|
||||
super(SeqNBVReconstructionDataset, self).__init__(config)
|
||||
self.type = config["type"]
|
||||
if self.type != namespace.Mode.TEST:
|
||||
Log.error("Dataset <seq_nbv_reconstruction_dataset> Only support test mode", terminate=True)
|
||||
self.config = config
|
||||
self.root_dir = config["root_dir"]
|
||||
self.split_file_path = config["split_file"]
|
||||
self.scene_name_list = self.load_scene_name_list()
|
||||
self.datalist = self.get_datalist()
|
||||
self.pts_num = config["pts_num"]
|
||||
|
||||
self.model_dir = config["model_dir"]
|
||||
self.filter_degree = config["filter_degree"]
|
||||
self.load_from_preprocess = config.get("load_from_preprocess", False)
|
||||
|
||||
|
||||
def load_scene_name_list(self):
|
||||
scene_name_list = []
|
||||
with open(self.split_file_path, "r") as f:
|
||||
for line in f:
|
||||
scene_name = line.strip()
|
||||
scene_name_list.append(scene_name)
|
||||
return scene_name_list
|
||||
|
||||
def get_datalist(self):
|
||||
datalist = []
|
||||
for scene_name in self.scene_name_list:
|
||||
seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
|
||||
scene_max_coverage_rate = 0
|
||||
scene_max_cr_idx = 0
|
||||
|
||||
for seq_idx in range(seq_num):
|
||||
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, seq_idx)
|
||||
label_data = DataLoadUtil.load_label(label_path)
|
||||
max_coverage_rate = label_data["max_coverage_rate"]
|
||||
if max_coverage_rate > scene_max_coverage_rate:
|
||||
scene_max_coverage_rate = max_coverage_rate
|
||||
scene_max_cr_idx = seq_idx
|
||||
|
||||
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, scene_max_cr_idx)
|
||||
label_data = DataLoadUtil.load_label(label_path)
|
||||
first_frame = label_data["best_sequence"][0]
|
||||
best_seq_len = len(label_data["best_sequence"])
|
||||
datalist.append({
|
||||
"scene_name": scene_name,
|
||||
"first_frame": first_frame,
|
||||
"max_coverage_rate": scene_max_coverage_rate,
|
||||
"best_seq_len": best_seq_len,
|
||||
"label_idx": scene_max_cr_idx,
|
||||
})
|
||||
return datalist
|
||||
|
||||
def __getitem__(self, index):
|
||||
data_item_info = self.datalist[index]
|
||||
first_frame_idx = data_item_info["first_frame"][0]
|
||||
first_frame_coverage = data_item_info["first_frame"][1]
|
||||
max_coverage_rate = data_item_info["max_coverage_rate"]
|
||||
scene_name = data_item_info["scene_name"]
|
||||
first_cam_info = DataLoadUtil.load_cam_info(DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx), binocular=True)
|
||||
first_view_path = DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx)
|
||||
first_left_cam_pose = first_cam_info["cam_to_world"]
|
||||
first_center_cam_pose = first_cam_info["cam_to_world_O"]
|
||||
first_target_point_cloud = DataLoadUtil.load_from_preprocessed_pts(first_view_path)
|
||||
first_pts_num = first_target_point_cloud.shape[0]
|
||||
first_downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(first_target_point_cloud, self.pts_num)
|
||||
first_to_world_rot_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(first_left_cam_pose[:3,:3]))
|
||||
first_to_world_trans = first_left_cam_pose[:3,3]
|
||||
first_to_world_9d = np.concatenate([first_to_world_rot_6d, first_to_world_trans], axis=0)
|
||||
diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
|
||||
voxel_threshold = diag*0.02
|
||||
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
|
||||
scene_path = os.path.join(self.root_dir, scene_name)
|
||||
model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
|
||||
|
||||
data_item = {
|
||||
"first_pts_num": np.asarray(
|
||||
first_pts_num, dtype=np.int32
|
||||
),
|
||||
"first_pts": np.asarray([first_downsampled_target_point_cloud],dtype=np.float32),
|
||||
"combined_scanned_pts": np.asarray(first_downsampled_target_point_cloud,dtype=np.float32),
|
||||
"first_to_world_9d": np.asarray([first_to_world_9d],dtype=np.float32),
|
||||
"scene_name": scene_name,
|
||||
"max_coverage_rate": max_coverage_rate,
|
||||
"voxel_threshold": voxel_threshold,
|
||||
"filter_degree": self.filter_degree,
|
||||
"O_to_L_pose": first_O_to_first_L_pose,
|
||||
"first_frame_coverage": first_frame_coverage,
|
||||
"scene_path": scene_path,
|
||||
"model_points_normals": model_points_normals,
|
||||
"best_seq_len": data_item_info["best_seq_len"],
|
||||
"first_frame_id": first_frame_idx,
|
||||
}
|
||||
return data_item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.datalist)
|
||||
|
||||
def get_collate_fn(self):
|
||||
def collate_fn(batch):
|
||||
collate_data = {}
|
||||
collate_data["first_pts"] = [torch.tensor(item['first_pts']) for item in batch]
|
||||
collate_data["first_to_world_9d"] = [torch.tensor(item['first_to_world_9d']) for item in batch]
|
||||
collate_data["combined_scanned_pts"] = torch.stack([torch.tensor(item['combined_scanned_pts']) for item in batch])
|
||||
for key in batch[0].keys():
|
||||
if key not in ["first_pts", "first_to_world_9d", "combined_scanned_pts"]:
|
||||
collate_data[key] = [item[key] for item in batch]
|
||||
return collate_data
|
||||
return collate_fn
|
||||
|
||||
# -------------- Debug ---------------- #
|
||||
if __name__ == "__main__":
|
||||
import torch
|
||||
seed = 0
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
config = {
|
||||
"root_dir": "/home/data/hofee/project/nbv_rec/data/nbv_rec_data_512_preproc_npy",
|
||||
"split_file": "/home/data/hofee/project/nbv_rec/data/OmniObject3d_train.txt",
|
||||
"model_dir": "/home/data/hofee/project/nbv_rec/data/scaled_object_meshes",
|
||||
"ratio": 0.005,
|
||||
"batch_size": 2,
|
||||
"filter_degree": 75,
|
||||
"num_workers": 0,
|
||||
"pts_num": 32684,
|
||||
"type": namespace.Mode.TEST,
|
||||
"load_from_preprocess": True
|
||||
}
|
||||
ds = SeqNBVReconstructionDataset(config)
|
||||
print(len(ds))
|
||||
#ds.__getitem__(10)
|
||||
dl = ds.get_loader(shuffle=True)
|
||||
for idx, data in enumerate(dl):
|
||||
data = ds.process_batch(data, "cuda:0")
|
||||
print(data)
|
||||
# ------ Debug Start ------
|
||||
import ipdb;ipdb.set_trace()
|
||||
# ------ Debug End ------+
|
@@ -20,8 +20,8 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
self.pose_encoder = ComponentFactory.create(
|
||||
namespace.Stereotype.MODULE, self.module_config["pose_encoder"]
|
||||
)
|
||||
self.transformer_seq_encoder = ComponentFactory.create(
|
||||
namespace.Stereotype.MODULE, self.module_config["transformer_seq_encoder"]
|
||||
self.seq_encoder = ComponentFactory.create(
|
||||
namespace.Stereotype.MODULE, self.module_config["seq_encoder"]
|
||||
)
|
||||
self.view_finder = ComponentFactory.create(
|
||||
namespace.Stereotype.MODULE, self.module_config["view_finder"]
|
||||
@@ -29,7 +29,6 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
|
||||
|
||||
self.eps = float(self.config["eps"])
|
||||
self.enable_global_scanned_feat = self.config["global_scanned_feat"]
|
||||
|
||||
def forward(self, data):
|
||||
mode = data["mode"]
|
||||
@@ -55,10 +54,7 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
return perturbed_x, random_t, target_score, std
|
||||
|
||||
def forward_train(self, data):
|
||||
start_time = time.time()
|
||||
main_feat = self.get_main_feat(data)
|
||||
end_time = time.time()
|
||||
print("get_main_feat time: ", end_time - start_time)
|
||||
""" get std """
|
||||
best_to_world_pose_9d_batch = data["best_to_world_pose_9d"]
|
||||
perturbed_x, random_t, target_score, std = self.pertube_data(
|
||||
@@ -79,6 +75,8 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
|
||||
def forward_test(self, data):
|
||||
main_feat = self.get_main_feat(data)
|
||||
repeat_num = data.get("repeat_num", 1)
|
||||
main_feat = main_feat.repeat(repeat_num, 1)
|
||||
estimated_delta_rot_9d, in_process_sample = self.view_finder.next_best_view(
|
||||
main_feat
|
||||
)
|
||||
@@ -108,7 +106,7 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
seq_embedding = pose_feat_seq
|
||||
embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp))
|
||||
|
||||
seq_feat = self.transformer_seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
|
||||
seq_feat = self.seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
|
||||
main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg))
|
||||
|
||||
if torch.isnan(main_feat).any():
|
||||
|
@@ -2,153 +2,208 @@ import numpy as np
|
||||
from PytorchBoot.dataset import BaseDataset
|
||||
import PytorchBoot.namespace as namespace
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from PytorchBoot.config import ConfigManager
|
||||
from PytorchBoot.utils.log_util import Log
|
||||
import torch
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(r"/home/data/hofee/project/nbv_rec/nbv_reconstruction")
|
||||
|
||||
sys.path.append(r"/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction")
|
||||
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.pose import PoseUtil
|
||||
from utils.pts import PtsUtil
|
||||
|
||||
@stereotype.dataset("seq_nbv_reconstruction_dataset")
|
||||
class SeqNBVReconstructionDataset(BaseDataset):
|
||||
|
||||
@stereotype.dataset("seq_reconstruction_dataset")
|
||||
class SeqReconstructionDataset(BaseDataset):
|
||||
def __init__(self, config):
|
||||
super(SeqNBVReconstructionDataset, self).__init__(config)
|
||||
self.type = config["type"]
|
||||
if self.type != namespace.Mode.TEST:
|
||||
Log.error("Dataset <seq_nbv_reconstruction_dataset> Only support test mode", terminate=True)
|
||||
super(SeqReconstructionDataset, self).__init__(config)
|
||||
self.config = config
|
||||
self.root_dir = config["root_dir"]
|
||||
self.split_file_path = config["split_file"]
|
||||
self.scene_name_list = self.load_scene_name_list()
|
||||
self.datalist = self.get_datalist()
|
||||
self.pts_num = config["pts_num"]
|
||||
|
||||
self.model_dir = config["model_dir"]
|
||||
self.filter_degree = config["filter_degree"]
|
||||
self.pts_num = config["pts_num"]
|
||||
self.type = config["type"]
|
||||
self.cache = config.get("cache")
|
||||
self.load_from_preprocess = config.get("load_from_preprocess", False)
|
||||
|
||||
if self.type == namespace.Mode.TEST:
|
||||
#self.model_dir = config["model_dir"]
|
||||
self.filter_degree = config["filter_degree"]
|
||||
if self.type == namespace.Mode.TRAIN:
|
||||
scale_ratio = 1
|
||||
self.datalist = self.datalist*scale_ratio
|
||||
if self.cache:
|
||||
expr_root = ConfigManager.get("runner", "experiment", "root_dir")
|
||||
expr_name = ConfigManager.get("runner", "experiment", "name")
|
||||
self.cache_dir = os.path.join(expr_root, expr_name, "cache")
|
||||
# self.preprocess_cache()
|
||||
|
||||
def load_scene_name_list(self):
|
||||
scene_name_list = []
|
||||
with open(self.split_file_path, "r") as f:
|
||||
for line in f:
|
||||
scene_name = line.strip()
|
||||
if not os.path.exists(os.path.join(self.root_dir, scene_name)):
|
||||
continue
|
||||
scene_name_list.append(scene_name)
|
||||
return scene_name_list
|
||||
|
||||
def get_scene_name_list(self):
|
||||
return self.scene_name_list
|
||||
|
||||
|
||||
def get_datalist(self):
|
||||
datalist = []
|
||||
for scene_name in self.scene_name_list:
|
||||
seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
|
||||
scene_max_coverage_rate = 0
|
||||
total = len(self.scene_name_list)
|
||||
for idx, scene_name in enumerate(self.scene_name_list):
|
||||
print(f"processing {scene_name} ({idx}/{total})")
|
||||
scene_max_cr_idx = 0
|
||||
frame_len = DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)
|
||||
|
||||
for seq_idx in range(seq_num):
|
||||
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, seq_idx)
|
||||
label_data = DataLoadUtil.load_label(label_path)
|
||||
max_coverage_rate = label_data["max_coverage_rate"]
|
||||
if max_coverage_rate > scene_max_coverage_rate:
|
||||
scene_max_coverage_rate = max_coverage_rate
|
||||
scene_max_cr_idx = seq_idx
|
||||
|
||||
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, scene_max_cr_idx)
|
||||
label_data = DataLoadUtil.load_label(label_path)
|
||||
first_frame = label_data["best_sequence"][0]
|
||||
best_seq_len = len(label_data["best_sequence"])
|
||||
for i in range(10,frame_len):
|
||||
path = DataLoadUtil.get_path(self.root_dir, scene_name, i)
|
||||
pts = DataLoadUtil.load_from_preprocessed_pts(path, "npy")
|
||||
print(pts.shape)
|
||||
if pts.shape[0] == 0:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
print(i)
|
||||
datalist.append({
|
||||
"scene_name": scene_name,
|
||||
"first_frame": first_frame,
|
||||
"max_coverage_rate": scene_max_coverage_rate,
|
||||
"best_seq_len": best_seq_len,
|
||||
"first_frame": i,
|
||||
"best_seq_len": -1,
|
||||
"max_coverage_rate": 1.0,
|
||||
"label_idx": scene_max_cr_idx,
|
||||
})
|
||||
return datalist
|
||||
|
||||
def preprocess_cache(self):
|
||||
Log.info("preprocessing cache...")
|
||||
for item_idx in range(len(self.datalist)):
|
||||
self.__getitem__(item_idx)
|
||||
Log.success("finish preprocessing cache.")
|
||||
|
||||
def load_from_cache(self, scene_name, curr_frame_idx):
|
||||
cache_name = f"{scene_name}_{curr_frame_idx}.txt"
|
||||
cache_path = os.path.join(self.cache_dir, cache_name)
|
||||
if os.path.exists(cache_path):
|
||||
data = np.loadtxt(cache_path)
|
||||
return data
|
||||
else:
|
||||
return None
|
||||
|
||||
def save_to_cache(self, scene_name, curr_frame_idx, data):
|
||||
cache_name = f"{scene_name}_{curr_frame_idx}.txt"
|
||||
cache_path = os.path.join(self.cache_dir, cache_name)
|
||||
try:
|
||||
np.savetxt(cache_path, data)
|
||||
except Exception as e:
|
||||
Log.error(f"Save cache failed: {e}")
|
||||
|
||||
def seq_combined_pts(self, scene, frame_idx_list):
|
||||
all_combined_pts = []
|
||||
for i in frame_idx_list:
|
||||
path = DataLoadUtil.get_path(self.root_dir, scene, i)
|
||||
pts = DataLoadUtil.load_from_preprocessed_pts(path,"npy")
|
||||
if pts.shape[0] == 0:
|
||||
continue
|
||||
all_combined_pts.append(pts)
|
||||
all_combined_pts = np.vstack(all_combined_pts)
|
||||
downsampled_all_pts = PtsUtil.voxel_downsample_point_cloud(all_combined_pts, 0.003)
|
||||
return downsampled_all_pts
|
||||
|
||||
def __getitem__(self, index):
|
||||
data_item_info = self.datalist[index]
|
||||
first_frame_idx = data_item_info["first_frame"][0]
|
||||
first_frame_coverage = data_item_info["first_frame"][1]
|
||||
max_coverage_rate = data_item_info["max_coverage_rate"]
|
||||
best_seq_len = data_item_info["best_seq_len"]
|
||||
scene_name = data_item_info["scene_name"]
|
||||
first_cam_info = DataLoadUtil.load_cam_info(DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx), binocular=True)
|
||||
first_view_path = DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx)
|
||||
first_left_cam_pose = first_cam_info["cam_to_world"]
|
||||
first_center_cam_pose = first_cam_info["cam_to_world_O"]
|
||||
first_target_point_cloud = DataLoadUtil.load_from_preprocessed_pts(first_view_path)
|
||||
first_pts_num = first_target_point_cloud.shape[0]
|
||||
first_downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(first_target_point_cloud, self.pts_num)
|
||||
first_to_world_rot_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(first_left_cam_pose[:3,:3]))
|
||||
first_to_world_trans = first_left_cam_pose[:3,3]
|
||||
first_to_world_9d = np.concatenate([first_to_world_rot_6d, first_to_world_trans], axis=0)
|
||||
diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
|
||||
voxel_threshold = diag*0.02
|
||||
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
|
||||
scene_path = os.path.join(self.root_dir, scene_name)
|
||||
model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
|
||||
(
|
||||
scanned_views_pts,
|
||||
scanned_coverages_rate,
|
||||
scanned_n_to_world_pose,
|
||||
) = ([], [], [])
|
||||
view = data_item_info["first_frame"]
|
||||
frame_idx = view
|
||||
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
|
||||
cam_info = DataLoadUtil.load_cam_info(view_path, binocular=True)
|
||||
|
||||
n_to_world_pose = cam_info["cam_to_world"]
|
||||
target_point_cloud = (
|
||||
DataLoadUtil.load_from_preprocessed_pts(view_path)
|
||||
)
|
||||
downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(
|
||||
target_point_cloud, self.pts_num
|
||||
)
|
||||
scanned_views_pts.append(downsampled_target_point_cloud)
|
||||
|
||||
n_to_world_6d = PoseUtil.matrix_to_rotation_6d_numpy(
|
||||
np.asarray(n_to_world_pose[:3, :3])
|
||||
)
|
||||
first_left_cam_pose = cam_info["cam_to_world"]
|
||||
first_center_cam_pose = cam_info["cam_to_world_O"]
|
||||
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
|
||||
n_to_world_trans = n_to_world_pose[:3, 3]
|
||||
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0)
|
||||
scanned_n_to_world_pose.append(n_to_world_9d)
|
||||
|
||||
frame_list = []
|
||||
for i in range(DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)):
|
||||
frame_list.append(i)
|
||||
gt_pts = self.seq_combined_pts(scene_name, frame_list)
|
||||
data_item = {
|
||||
"first_pts_num": np.asarray(
|
||||
first_pts_num, dtype=np.int32
|
||||
),
|
||||
"first_pts": np.asarray([first_downsampled_target_point_cloud],dtype=np.float32),
|
||||
"combined_scanned_pts": np.asarray(first_downsampled_target_point_cloud,dtype=np.float32),
|
||||
"first_to_world_9d": np.asarray([first_to_world_9d],dtype=np.float32),
|
||||
"scene_name": scene_name,
|
||||
"max_coverage_rate": max_coverage_rate,
|
||||
"voxel_threshold": voxel_threshold,
|
||||
"filter_degree": self.filter_degree,
|
||||
"first_scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
|
||||
"first_scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
|
||||
"seq_max_coverage_rate": max_coverage_rate, # Float, range(0, 1)
|
||||
"best_seq_len": best_seq_len, # Int
|
||||
"scene_name": scene_name, # String
|
||||
"gt_pts": gt_pts, # Ndarray(N x 3)
|
||||
"scene_path": os.path.join(self.root_dir, scene_name), # String
|
||||
"O_to_L_pose": first_O_to_first_L_pose,
|
||||
"first_frame_coverage": first_frame_coverage,
|
||||
"scene_path": scene_path,
|
||||
"model_points_normals": model_points_normals,
|
||||
"best_seq_len": data_item_info["best_seq_len"],
|
||||
"first_frame_id": first_frame_idx,
|
||||
}
|
||||
|
||||
return data_item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.datalist)
|
||||
|
||||
def get_collate_fn(self):
|
||||
def collate_fn(batch):
|
||||
collate_data = {}
|
||||
collate_data["first_pts"] = [torch.tensor(item['first_pts']) for item in batch]
|
||||
collate_data["first_to_world_9d"] = [torch.tensor(item['first_to_world_9d']) for item in batch]
|
||||
collate_data["combined_scanned_pts"] = torch.stack([torch.tensor(item['combined_scanned_pts']) for item in batch])
|
||||
for key in batch[0].keys():
|
||||
if key not in ["first_pts", "first_to_world_9d", "combined_scanned_pts"]:
|
||||
collate_data[key] = [item[key] for item in batch]
|
||||
return collate_data
|
||||
return collate_fn
|
||||
|
||||
# -------------- Debug ---------------- #
|
||||
if __name__ == "__main__":
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
import pickle
|
||||
import os
|
||||
|
||||
seed = 0
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
|
||||
config = {
|
||||
"root_dir": "/home/data/hofee/project/nbv_rec/data/nbv_rec_data_512_preproc_npy",
|
||||
"split_file": "/home/data/hofee/project/nbv_rec/data/OmniObject3d_train.txt",
|
||||
"model_dir": "/home/data/hofee/project/nbv_rec/data/scaled_object_meshes",
|
||||
"ratio": 0.005,
|
||||
"batch_size": 2,
|
||||
"root_dir": "/media/hofee/data/data/test_bottle/view",
|
||||
"source": "seq_reconstruction_dataset",
|
||||
"split_file": "/media/hofee/data/data/test_bottle/test_bottle.txt",
|
||||
"load_from_preprocess": True,
|
||||
"filter_degree": 75,
|
||||
"num_workers": 0,
|
||||
"pts_num": 32684,
|
||||
"pts_num": 8192,
|
||||
"type": namespace.Mode.TEST,
|
||||
"load_from_preprocess": True
|
||||
}
|
||||
ds = SeqNBVReconstructionDataset(config)
|
||||
print(len(ds))
|
||||
#ds.__getitem__(10)
|
||||
dl = ds.get_loader(shuffle=True)
|
||||
for idx, data in enumerate(dl):
|
||||
data = ds.process_batch(data, "cuda:0")
|
||||
print(data)
|
||||
# ------ Debug Start ------
|
||||
import ipdb;ipdb.set_trace()
|
||||
# ------ Debug End ------+
|
||||
|
||||
output_dir = "/media/hofee/data/data/test_bottle/preprocessed_dataset"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
ds = SeqReconstructionDataset(config)
|
||||
for i in tqdm(range(len(ds)), desc="processing dataset"):
|
||||
output_path = os.path.join(output_dir, f"item_{i}.pkl")
|
||||
item = ds.__getitem__(i)
|
||||
for key, value in item.items():
|
||||
if isinstance(value, np.ndarray):
|
||||
item[key] = value.tolist()
|
||||
#import ipdb; ipdb.set_trace()
|
||||
with open(output_path, "wb") as f:
|
||||
pickle.dump(item, f)
|
||||
|
82
core/seq_dataset_preprocessed.py
Normal file
82
core/seq_dataset_preprocessed.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
from PytorchBoot.dataset import BaseDataset
|
||||
import PytorchBoot.namespace as namespace
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from PytorchBoot.config import ConfigManager
|
||||
from PytorchBoot.utils.log_util import Log
|
||||
import pickle
|
||||
import torch
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction")
|
||||
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.pose import PoseUtil
|
||||
from utils.pts import PtsUtil
|
||||
|
||||
@stereotype.dataset("seq_reconstruction_dataset_preprocessed")
|
||||
class SeqReconstructionDatasetPreprocessed(BaseDataset):
|
||||
def __init__(self, config):
|
||||
super(SeqReconstructionDatasetPreprocessed, self).__init__(config)
|
||||
self.config = config
|
||||
self.root_dir = config["root_dir"]
|
||||
self.real_root_dir = r"/media/hofee/data/data/new_testset"
|
||||
self.item_list = os.listdir(self.root_dir)
|
||||
|
||||
def __getitem__(self, index):
|
||||
data = pickle.load(open(os.path.join(self.root_dir, self.item_list[index]), "rb"))
|
||||
data_item = {
|
||||
"first_scanned_pts": np.asarray(data["first_scanned_pts"], dtype=np.float32), # Ndarray(S x Nv x 3)
|
||||
"first_scanned_n_to_world_pose_9d": np.asarray(data["first_scanned_n_to_world_pose_9d"], dtype=np.float32), # Ndarray(S x 9)
|
||||
"seq_max_coverage_rate": data["seq_max_coverage_rate"], # Float, range(0, 1)
|
||||
"best_seq_len": data["best_seq_len"], # Int
|
||||
"scene_name": data["scene_name"], # String
|
||||
"gt_pts": np.asarray(data["gt_pts"], dtype=np.float32), # Ndarray(N x 3)
|
||||
"scene_path": os.path.join(self.real_root_dir, data["scene_name"]), # String
|
||||
"O_to_L_pose": np.asarray(data["O_to_L_pose"], dtype=np.float32),
|
||||
}
|
||||
return data_item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.item_list)
|
||||
|
||||
# -------------- Debug ---------------- #
|
||||
if __name__ == "__main__":
|
||||
import torch
|
||||
|
||||
seed = 0
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
'''
|
||||
OmniObject3d_test:
|
||||
root_dir: "H:\\AI\\Datasets\\packed_test_data"
|
||||
model_dir: "H:\\AI\\Datasets\\scaled_object_meshes"
|
||||
source: seq_reconstruction_dataset
|
||||
split_file: "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt"
|
||||
type: test
|
||||
filter_degree: 75
|
||||
eval_list:
|
||||
- pose_diff
|
||||
- coverage_rate_increase
|
||||
ratio: 0.1
|
||||
batch_size: 1
|
||||
num_workers: 12
|
||||
pts_num: 8192
|
||||
load_from_preprocess: True
|
||||
'''
|
||||
config = {
|
||||
"root_dir": "/media/hofee/data/data/test_bottle/preprocessed_dataset",
|
||||
"source": "seq_reconstruction_dataset",
|
||||
"split_file": "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt",
|
||||
"load_from_preprocess": True,
|
||||
"ratio": 1,
|
||||
"filter_degree": 75,
|
||||
"num_workers": 0,
|
||||
"pts_num": 8192,
|
||||
"type": "test",
|
||||
}
|
||||
ds = SeqReconstructionDataset(config)
|
||||
print(len(ds))
|
||||
print(ds.__getitem__(10))
|
||||
|
162
modules/module_lib/pointnet2_modules.py
Normal file
162
modules/module_lib/pointnet2_modules.py
Normal file
@@ -0,0 +1,162 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from . import pointnet2_utils
|
||||
from . import pytorch_utils as pt_utils
|
||||
from typing import List
|
||||
|
||||
|
||||
class _PointnetSAModuleBase(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.npoint = None
|
||||
self.groupers = None
|
||||
self.mlps = None
|
||||
self.pool_method = 'max_pool'
|
||||
|
||||
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
|
||||
"""
|
||||
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
|
||||
:param features: (B, N, C) tensor of the descriptors of the the features
|
||||
:param new_xyz:
|
||||
:return:
|
||||
new_xyz: (B, npoint, 3) tensor of the new features' xyz
|
||||
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
|
||||
"""
|
||||
new_features_list = []
|
||||
|
||||
xyz_flipped = xyz.transpose(1, 2).contiguous()
|
||||
if new_xyz is None:
|
||||
new_xyz = pointnet2_utils.gather_operation(
|
||||
xyz_flipped,
|
||||
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
|
||||
).transpose(1, 2).contiguous() if self.npoint is not None else None
|
||||
|
||||
for i in range(len(self.groupers)):
|
||||
new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
|
||||
|
||||
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
|
||||
|
||||
if self.pool_method == 'max_pool':
|
||||
new_features = F.max_pool2d(
|
||||
new_features, kernel_size=[1, new_features.size(3)]
|
||||
) # (B, mlp[-1], npoint, 1)
|
||||
elif self.pool_method == 'avg_pool':
|
||||
new_features = F.avg_pool2d(
|
||||
new_features, kernel_size=[1, new_features.size(3)]
|
||||
) # (B, mlp[-1], npoint, 1)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
|
||||
new_features_list.append(new_features)
|
||||
|
||||
return new_xyz, torch.cat(new_features_list, dim=1)
|
||||
|
||||
|
||||
class PointnetSAModuleMSG(_PointnetSAModuleBase):
|
||||
"""Pointnet set abstraction layer with multiscale grouping"""
|
||||
|
||||
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True,
|
||||
use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
|
||||
"""
|
||||
:param npoint: int
|
||||
:param radii: list of float, list of radii to group with
|
||||
:param nsamples: list of int, number of samples in each ball query
|
||||
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
|
||||
:param bn: whether to use batchnorm
|
||||
:param use_xyz:
|
||||
:param pool_method: max_pool / avg_pool
|
||||
:param instance_norm: whether to use instance_norm
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
assert len(radii) == len(nsamples) == len(mlps)
|
||||
|
||||
self.npoint = npoint
|
||||
self.groupers = nn.ModuleList()
|
||||
self.mlps = nn.ModuleList()
|
||||
for i in range(len(radii)):
|
||||
radius = radii[i]
|
||||
nsample = nsamples[i]
|
||||
self.groupers.append(
|
||||
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
|
||||
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
|
||||
)
|
||||
mlp_spec = mlps[i]
|
||||
if use_xyz:
|
||||
mlp_spec[0] += 3
|
||||
|
||||
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm))
|
||||
self.pool_method = pool_method
|
||||
|
||||
|
||||
class PointnetSAModule(PointnetSAModuleMSG):
|
||||
"""Pointnet set abstraction layer"""
|
||||
|
||||
def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None,
|
||||
bn: bool = True, use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
|
||||
"""
|
||||
:param mlp: list of int, spec of the pointnet before the global max_pool
|
||||
:param npoint: int, number of features
|
||||
:param radius: float, radius of ball
|
||||
:param nsample: int, number of samples in the ball query
|
||||
:param bn: whether to use batchnorm
|
||||
:param use_xyz:
|
||||
:param pool_method: max_pool / avg_pool
|
||||
:param instance_norm: whether to use instance_norm
|
||||
"""
|
||||
super().__init__(
|
||||
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz,
|
||||
pool_method=pool_method, instance_norm=instance_norm
|
||||
)
|
||||
|
||||
|
||||
class PointnetFPModule(nn.Module):
|
||||
r"""Propigates the features of one set to another"""
|
||||
|
||||
def __init__(self, *, mlp: List[int], bn: bool = True):
|
||||
"""
|
||||
:param mlp: list of int
|
||||
:param bn: whether to use batchnorm
|
||||
"""
|
||||
super().__init__()
|
||||
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
|
||||
|
||||
def forward(
|
||||
self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
|
||||
:param known: (B, m, 3) tensor of the xyz positions of the known features
|
||||
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
|
||||
:param known_feats: (B, C2, m) tensor of features to be propigated
|
||||
:return:
|
||||
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
|
||||
"""
|
||||
if known is not None:
|
||||
dist, idx = pointnet2_utils.three_nn(unknown, known)
|
||||
dist_recip = 1.0 / (dist + 1e-8)
|
||||
norm = torch.sum(dist_recip, dim=2, keepdim=True)
|
||||
weight = dist_recip / norm
|
||||
|
||||
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
|
||||
else:
|
||||
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
|
||||
|
||||
if unknow_feats is not None:
|
||||
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n)
|
||||
else:
|
||||
new_features = interpolated_feats
|
||||
|
||||
new_features = new_features.unsqueeze(-1)
|
||||
|
||||
new_features = self.mlp(new_features)
|
||||
|
||||
return new_features.squeeze(-1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
291
modules/module_lib/pointnet2_utils.py
Normal file
291
modules/module_lib/pointnet2_utils.py
Normal file
@@ -0,0 +1,291 @@
|
||||
import torch
|
||||
from torch.autograd import Variable
|
||||
from torch.autograd import Function
|
||||
import torch.nn as nn
|
||||
from typing import Tuple
|
||||
import sys
|
||||
|
||||
import pointnet2_cuda as pointnet2
|
||||
|
||||
|
||||
class FurthestPointSampling(Function):
|
||||
@staticmethod
|
||||
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
|
||||
"""
|
||||
Uses iterative furthest point sampling to select a set of npoint features that have the largest
|
||||
minimum distance
|
||||
:param ctx:
|
||||
:param xyz: (B, N, 3) where N > npoint
|
||||
:param npoint: int, number of features in the sampled set
|
||||
:return:
|
||||
output: (B, npoint) tensor containing the set
|
||||
"""
|
||||
assert xyz.is_contiguous()
|
||||
|
||||
B, N, _ = xyz.size()
|
||||
output = torch.cuda.IntTensor(B, npoint)
|
||||
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
|
||||
|
||||
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
|
||||
return output
|
||||
|
||||
@staticmethod
|
||||
def backward(xyz, a=None):
|
||||
return None, None
|
||||
|
||||
|
||||
furthest_point_sample = FurthestPointSampling.apply
|
||||
|
||||
|
||||
class GatherOperation(Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
:param ctx:
|
||||
:param features: (B, C, N)
|
||||
:param idx: (B, npoint) index tensor of the features to gather
|
||||
:return:
|
||||
output: (B, C, npoint)
|
||||
"""
|
||||
assert features.is_contiguous()
|
||||
assert idx.is_contiguous()
|
||||
|
||||
B, npoint = idx.size()
|
||||
_, C, N = features.size()
|
||||
output = torch.cuda.FloatTensor(B, C, npoint)
|
||||
|
||||
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
|
||||
|
||||
ctx.for_backwards = (idx, C, N)
|
||||
return output
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad_out):
|
||||
idx, C, N = ctx.for_backwards
|
||||
B, npoint = idx.size()
|
||||
|
||||
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
|
||||
grad_out_data = grad_out.data.contiguous()
|
||||
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
|
||||
return grad_features, None
|
||||
|
||||
|
||||
gather_operation = GatherOperation.apply
|
||||
|
||||
|
||||
class ThreeNN(Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Find the three nearest neighbors of unknown in known
|
||||
:param ctx:
|
||||
:param unknown: (B, N, 3)
|
||||
:param known: (B, M, 3)
|
||||
:return:
|
||||
dist: (B, N, 3) l2 distance to the three nearest neighbors
|
||||
idx: (B, N, 3) index of 3 nearest neighbors
|
||||
"""
|
||||
assert unknown.is_contiguous()
|
||||
assert known.is_contiguous()
|
||||
|
||||
B, N, _ = unknown.size()
|
||||
m = known.size(1)
|
||||
dist2 = torch.cuda.FloatTensor(B, N, 3)
|
||||
idx = torch.cuda.IntTensor(B, N, 3)
|
||||
|
||||
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
|
||||
return torch.sqrt(dist2), idx
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, a=None, b=None):
|
||||
return None, None
|
||||
|
||||
|
||||
three_nn = ThreeNN.apply
|
||||
|
||||
|
||||
class ThreeInterpolate(Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Performs weight linear interpolation on 3 features
|
||||
:param ctx:
|
||||
:param features: (B, C, M) Features descriptors to be interpolated from
|
||||
:param idx: (B, n, 3) three nearest neighbors of the target features in features
|
||||
:param weight: (B, n, 3) weights
|
||||
:return:
|
||||
output: (B, C, N) tensor of the interpolated features
|
||||
"""
|
||||
assert features.is_contiguous()
|
||||
assert idx.is_contiguous()
|
||||
assert weight.is_contiguous()
|
||||
|
||||
B, c, m = features.size()
|
||||
n = idx.size(1)
|
||||
ctx.three_interpolate_for_backward = (idx, weight, m)
|
||||
output = torch.cuda.FloatTensor(B, c, n)
|
||||
|
||||
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
|
||||
return output
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
:param ctx:
|
||||
:param grad_out: (B, C, N) tensor with gradients of outputs
|
||||
:return:
|
||||
grad_features: (B, C, M) tensor with gradients of features
|
||||
None:
|
||||
None:
|
||||
"""
|
||||
idx, weight, m = ctx.three_interpolate_for_backward
|
||||
B, c, n = grad_out.size()
|
||||
|
||||
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
|
||||
grad_out_data = grad_out.data.contiguous()
|
||||
|
||||
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
|
||||
return grad_features, None, None
|
||||
|
||||
|
||||
three_interpolate = ThreeInterpolate.apply
|
||||
|
||||
|
||||
class GroupingOperation(Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
:param ctx:
|
||||
:param features: (B, C, N) tensor of features to group
|
||||
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
|
||||
:return:
|
||||
output: (B, C, npoint, nsample) tensor
|
||||
"""
|
||||
assert features.is_contiguous()
|
||||
assert idx.is_contiguous()
|
||||
|
||||
B, nfeatures, nsample = idx.size()
|
||||
_, C, N = features.size()
|
||||
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
|
||||
|
||||
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
|
||||
|
||||
ctx.for_backwards = (idx, N)
|
||||
return output
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
:param ctx:
|
||||
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
|
||||
:return:
|
||||
grad_features: (B, C, N) gradient of the features
|
||||
"""
|
||||
idx, N = ctx.for_backwards
|
||||
|
||||
B, C, npoint, nsample = grad_out.size()
|
||||
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
|
||||
|
||||
grad_out_data = grad_out.data.contiguous()
|
||||
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
|
||||
return grad_features, None
|
||||
|
||||
|
||||
grouping_operation = GroupingOperation.apply
|
||||
|
||||
|
||||
class BallQuery(Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
:param ctx:
|
||||
:param radius: float, radius of the balls
|
||||
:param nsample: int, maximum number of features in the balls
|
||||
:param xyz: (B, N, 3) xyz coordinates of the features
|
||||
:param new_xyz: (B, npoint, 3) centers of the ball query
|
||||
:return:
|
||||
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
|
||||
"""
|
||||
assert new_xyz.is_contiguous()
|
||||
assert xyz.is_contiguous()
|
||||
|
||||
B, N, _ = xyz.size()
|
||||
npoint = new_xyz.size(1)
|
||||
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
|
||||
|
||||
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
|
||||
return idx
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, a=None):
|
||||
return None, None, None, None
|
||||
|
||||
|
||||
ball_query = BallQuery.apply
|
||||
|
||||
|
||||
class QueryAndGroup(nn.Module):
|
||||
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
|
||||
"""
|
||||
:param radius: float, radius of ball
|
||||
:param nsample: int, maximum number of features to gather in the ball
|
||||
:param use_xyz:
|
||||
"""
|
||||
super().__init__()
|
||||
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
|
||||
|
||||
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
|
||||
"""
|
||||
:param xyz: (B, N, 3) xyz coordinates of the features
|
||||
:param new_xyz: (B, npoint, 3) centroids
|
||||
:param features: (B, C, N) descriptors of the features
|
||||
:return:
|
||||
new_features: (B, 3 + C, npoint, nsample)
|
||||
"""
|
||||
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
|
||||
xyz_trans = xyz.transpose(1, 2).contiguous()
|
||||
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
|
||||
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
|
||||
|
||||
if features is not None:
|
||||
grouped_features = grouping_operation(features, idx)
|
||||
if self.use_xyz:
|
||||
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
|
||||
else:
|
||||
new_features = grouped_features
|
||||
else:
|
||||
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
|
||||
new_features = grouped_xyz
|
||||
|
||||
return new_features
|
||||
|
||||
|
||||
class GroupAll(nn.Module):
|
||||
def __init__(self, use_xyz: bool = True):
|
||||
super().__init__()
|
||||
self.use_xyz = use_xyz
|
||||
|
||||
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
|
||||
"""
|
||||
:param xyz: (B, N, 3) xyz coordinates of the features
|
||||
:param new_xyz: ignored
|
||||
:param features: (B, C, N) descriptors of the features
|
||||
:return:
|
||||
new_features: (B, C + 3, 1, N)
|
||||
"""
|
||||
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
|
||||
if features is not None:
|
||||
grouped_features = features.unsqueeze(2)
|
||||
if self.use_xyz:
|
||||
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
|
||||
else:
|
||||
new_features = grouped_features
|
||||
else:
|
||||
new_features = grouped_xyz
|
||||
|
||||
return new_features
|
236
modules/module_lib/pytorch_utils.py
Normal file
236
modules/module_lib/pytorch_utils.py
Normal file
@@ -0,0 +1,236 @@
|
||||
import torch.nn as nn
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
class SharedMLP(nn.Sequential):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
args: List[int],
|
||||
*,
|
||||
bn: bool = False,
|
||||
activation=nn.ReLU(inplace=True),
|
||||
preact: bool = False,
|
||||
first: bool = False,
|
||||
name: str = "",
|
||||
instance_norm: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
for i in range(len(args) - 1):
|
||||
self.add_module(
|
||||
name + 'layer{}'.format(i),
|
||||
Conv2d(
|
||||
args[i],
|
||||
args[i + 1],
|
||||
bn=(not first or not preact or (i != 0)) and bn,
|
||||
activation=activation
|
||||
if (not first or not preact or (i != 0)) else None,
|
||||
preact=preact,
|
||||
instance_norm=instance_norm
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class _ConvBase(nn.Sequential):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_size,
|
||||
out_size,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
activation,
|
||||
bn,
|
||||
init,
|
||||
conv=None,
|
||||
batch_norm=None,
|
||||
bias=True,
|
||||
preact=False,
|
||||
name="",
|
||||
instance_norm=False,
|
||||
instance_norm_func=None
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
bias = bias and (not bn)
|
||||
conv_unit = conv(
|
||||
in_size,
|
||||
out_size,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
bias=bias
|
||||
)
|
||||
init(conv_unit.weight)
|
||||
if bias:
|
||||
nn.init.constant_(conv_unit.bias, 0)
|
||||
|
||||
if bn:
|
||||
if not preact:
|
||||
bn_unit = batch_norm(out_size)
|
||||
else:
|
||||
bn_unit = batch_norm(in_size)
|
||||
if instance_norm:
|
||||
if not preact:
|
||||
in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)
|
||||
else:
|
||||
in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)
|
||||
|
||||
if preact:
|
||||
if bn:
|
||||
self.add_module(name + 'bn', bn_unit)
|
||||
|
||||
if activation is not None:
|
||||
self.add_module(name + 'activation', activation)
|
||||
|
||||
if not bn and instance_norm:
|
||||
self.add_module(name + 'in', in_unit)
|
||||
|
||||
self.add_module(name + 'conv', conv_unit)
|
||||
|
||||
if not preact:
|
||||
if bn:
|
||||
self.add_module(name + 'bn', bn_unit)
|
||||
|
||||
if activation is not None:
|
||||
self.add_module(name + 'activation', activation)
|
||||
|
||||
if not bn and instance_norm:
|
||||
self.add_module(name + 'in', in_unit)
|
||||
|
||||
|
||||
class _BNBase(nn.Sequential):
|
||||
|
||||
def __init__(self, in_size, batch_norm=None, name=""):
|
||||
super().__init__()
|
||||
self.add_module(name + "bn", batch_norm(in_size))
|
||||
|
||||
nn.init.constant_(self[0].weight, 1.0)
|
||||
nn.init.constant_(self[0].bias, 0)
|
||||
|
||||
|
||||
class BatchNorm1d(_BNBase):
|
||||
|
||||
def __init__(self, in_size: int, *, name: str = ""):
|
||||
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
|
||||
|
||||
|
||||
class BatchNorm2d(_BNBase):
|
||||
|
||||
def __init__(self, in_size: int, name: str = ""):
|
||||
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
|
||||
|
||||
|
||||
class Conv1d(_ConvBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_size: int,
|
||||
out_size: int,
|
||||
*,
|
||||
kernel_size: int = 1,
|
||||
stride: int = 1,
|
||||
padding: int = 0,
|
||||
activation=nn.ReLU(inplace=True),
|
||||
bn: bool = False,
|
||||
init=nn.init.kaiming_normal_,
|
||||
bias: bool = True,
|
||||
preact: bool = False,
|
||||
name: str = "",
|
||||
instance_norm=False
|
||||
):
|
||||
super().__init__(
|
||||
in_size,
|
||||
out_size,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
activation,
|
||||
bn,
|
||||
init,
|
||||
conv=nn.Conv1d,
|
||||
batch_norm=BatchNorm1d,
|
||||
bias=bias,
|
||||
preact=preact,
|
||||
name=name,
|
||||
instance_norm=instance_norm,
|
||||
instance_norm_func=nn.InstanceNorm1d
|
||||
)
|
||||
|
||||
|
||||
class Conv2d(_ConvBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_size: int,
|
||||
out_size: int,
|
||||
*,
|
||||
kernel_size: Tuple[int, int] = (1, 1),
|
||||
stride: Tuple[int, int] = (1, 1),
|
||||
padding: Tuple[int, int] = (0, 0),
|
||||
activation=nn.ReLU(inplace=True),
|
||||
bn: bool = False,
|
||||
init=nn.init.kaiming_normal_,
|
||||
bias: bool = True,
|
||||
preact: bool = False,
|
||||
name: str = "",
|
||||
instance_norm=False
|
||||
):
|
||||
super().__init__(
|
||||
in_size,
|
||||
out_size,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
activation,
|
||||
bn,
|
||||
init,
|
||||
conv=nn.Conv2d,
|
||||
batch_norm=BatchNorm2d,
|
||||
bias=bias,
|
||||
preact=preact,
|
||||
name=name,
|
||||
instance_norm=instance_norm,
|
||||
instance_norm_func=nn.InstanceNorm2d
|
||||
)
|
||||
|
||||
|
||||
class FC(nn.Sequential):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_size: int,
|
||||
out_size: int,
|
||||
*,
|
||||
activation=nn.ReLU(inplace=True),
|
||||
bn: bool = False,
|
||||
init=None,
|
||||
preact: bool = False,
|
||||
name: str = ""
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
fc = nn.Linear(in_size, out_size, bias=not bn)
|
||||
if init is not None:
|
||||
init(fc.weight)
|
||||
if not bn:
|
||||
nn.init.constant(fc.bias, 0)
|
||||
|
||||
if preact:
|
||||
if bn:
|
||||
self.add_module(name + 'bn', BatchNorm1d(in_size))
|
||||
|
||||
if activation is not None:
|
||||
self.add_module(name + 'activation', activation)
|
||||
|
||||
self.add_module(name + 'fc', fc)
|
||||
|
||||
if not preact:
|
||||
if bn:
|
||||
self.add_module(name + 'bn', BatchNorm1d(out_size))
|
||||
|
||||
if activation is not None:
|
||||
self.add_module(name + 'activation', activation)
|
||||
|
148
modules/pointnet++_encoder.py
Normal file
148
modules/pointnet++_encoder.py
Normal file
@@ -0,0 +1,148 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import os
|
||||
import sys
|
||||
path = os.path.abspath(__file__)
|
||||
for i in range(2):
|
||||
path = os.path.dirname(path)
|
||||
PROJECT_ROOT = path
|
||||
sys.path.append(PROJECT_ROOT)
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from modules.module_lib.pointnet2_modules import PointnetSAModuleMSG
|
||||
|
||||
|
||||
ClsMSG_CFG_Dense = {
|
||||
'NPOINTS': [512, 256, 128, None],
|
||||
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
|
||||
'NSAMPLE': [[32, 64], [16, 32], [8, 16], [None, None]],
|
||||
'MLPS': [[[16, 16, 32], [32, 32, 64]],
|
||||
[[64, 64, 128], [64, 96, 128]],
|
||||
[[128, 196, 256], [128, 196, 256]],
|
||||
[[256, 256, 512], [256, 384, 512]]],
|
||||
'DP_RATIO': 0.5,
|
||||
}
|
||||
|
||||
ClsMSG_CFG_Light = {
|
||||
'NPOINTS': [512, 256, 128, None],
|
||||
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
|
||||
'NSAMPLE': [[16, 32], [16, 32], [16, 32], [None, None]],
|
||||
'MLPS': [[[16, 16, 32], [32, 32, 64]],
|
||||
[[64, 64, 128], [64, 96, 128]],
|
||||
[[128, 196, 256], [128, 196, 256]],
|
||||
[[256, 256, 512], [256, 384, 512]]],
|
||||
'DP_RATIO': 0.5,
|
||||
}
|
||||
|
||||
ClsMSG_CFG_Light_2048 = {
|
||||
'NPOINTS': [512, 256, 128, None],
|
||||
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
|
||||
'NSAMPLE': [[16, 32], [16, 32], [16, 32], [None, None]],
|
||||
'MLPS': [[[16, 16, 32], [32, 32, 64]],
|
||||
[[64, 64, 128], [64, 96, 128]],
|
||||
[[128, 196, 256], [128, 196, 256]],
|
||||
[[256, 256, 1024], [256, 512, 1024]]],
|
||||
'DP_RATIO': 0.5,
|
||||
}
|
||||
|
||||
ClsMSG_CFG_Strong = {
|
||||
'NPOINTS': [1024, 512, 256, 128, None], # 增加采样点,获取更多细节
|
||||
'RADIUS': [[0.02, 0.05], [0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [None, None]], # 增大感受野
|
||||
'NSAMPLE': [[32, 64], [32, 64], [32, 64], [32, 64], [None, None]], # 提高每层的采样点数
|
||||
'MLPS': [[[32, 32, 64], [64, 64, 128]], # 增强 MLP 层,增加特征提取能力
|
||||
[[128, 128, 256], [128, 128, 256]],
|
||||
[[256, 256, 512], [256, 384, 512]],
|
||||
[[512, 512, 1024], [512, 768, 1024]],
|
||||
[[1024, 1024, 2048], [1024, 1024, 2048]]], # 增加更深的特征层
|
||||
'DP_RATIO': 0.4, # Dropout 比率稍微降低,以保留更多信息
|
||||
}
|
||||
|
||||
ClsMSG_CFG_Lighter = {
|
||||
'NPOINTS': [512, 256, 128, 64, None],
|
||||
'RADIUS': [[0.01], [0.02], [0.04], [0.08], [None]],
|
||||
'NSAMPLE': [[64], [32], [16], [8], [None]],
|
||||
'MLPS': [[[32, 32, 64]],
|
||||
[[64, 64, 128]],
|
||||
[[128, 196, 256]],
|
||||
[[256, 256, 512]],
|
||||
[[512, 512, 1024]]],
|
||||
'DP_RATIO': 0.5,
|
||||
}
|
||||
|
||||
|
||||
def select_params(name):
|
||||
if name == 'light':
|
||||
return ClsMSG_CFG_Light
|
||||
elif name == 'lighter':
|
||||
return ClsMSG_CFG_Lighter
|
||||
elif name == 'dense':
|
||||
return ClsMSG_CFG_Dense
|
||||
elif name == 'light_2048':
|
||||
return ClsMSG_CFG_Light_2048
|
||||
elif name == 'strong':
|
||||
return ClsMSG_CFG_Strong
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def break_up_pc(pc):
|
||||
xyz = pc[..., 0:3].contiguous()
|
||||
features = (
|
||||
pc[..., 3:].transpose(1, 2).contiguous()
|
||||
if pc.size(-1) > 3 else None
|
||||
)
|
||||
|
||||
return xyz, features
|
||||
|
||||
|
||||
@stereotype.module("pointnet++_encoder")
|
||||
class PointNet2Encoder(nn.Module):
|
||||
def encode_points(self, pts, require_per_point_feat=False):
|
||||
return self.forward(pts)
|
||||
|
||||
def __init__(self, config:dict):
|
||||
super().__init__()
|
||||
|
||||
channel_in = config.get("in_dim", 3) - 3
|
||||
params_name = config.get("params_name", "light")
|
||||
|
||||
self.SA_modules = nn.ModuleList()
|
||||
selected_params = select_params(params_name)
|
||||
for k in range(selected_params['NPOINTS'].__len__()):
|
||||
mlps = selected_params['MLPS'][k].copy()
|
||||
channel_out = 0
|
||||
for idx in range(mlps.__len__()):
|
||||
mlps[idx] = [channel_in] + mlps[idx]
|
||||
channel_out += mlps[idx][-1]
|
||||
|
||||
self.SA_modules.append(
|
||||
PointnetSAModuleMSG(
|
||||
npoint=selected_params['NPOINTS'][k],
|
||||
radii=selected_params['RADIUS'][k],
|
||||
nsamples=selected_params['NSAMPLE'][k],
|
||||
mlps=mlps,
|
||||
use_xyz=True,
|
||||
bn=True
|
||||
)
|
||||
)
|
||||
channel_in = channel_out
|
||||
|
||||
def forward(self, point_cloud: torch.cuda.FloatTensor):
|
||||
xyz, features = break_up_pc(point_cloud)
|
||||
|
||||
l_xyz, l_features = [xyz], [features]
|
||||
for i in range(len(self.SA_modules)):
|
||||
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
|
||||
l_xyz.append(li_xyz)
|
||||
l_features.append(li_features)
|
||||
return l_features[-1].squeeze(-1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
seed = 100
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed(seed)
|
||||
net = PointNet2Encoder(config={"in_dim": 3, "params_name": "strong"}).cuda()
|
||||
pts = torch.randn(2, 2444, 3).cuda()
|
||||
print(torch.mean(pts, dim=1))
|
||||
pre = net.encode_points(pts)
|
||||
print(pre.shape)
|
@@ -164,10 +164,10 @@ def save_scene_data(root, scene, scene_idx=0, scene_total=1,file_type="txt"):
|
||||
|
||||
if __name__ == "__main__":
|
||||
#root = "/media/hofee/repository/new_data_with_normal"
|
||||
root = r"H:\AI\Datasets\nbv_rec_part2"
|
||||
root = r"/media/hofee/data/data/test_bottle/view"
|
||||
scene_list = os.listdir(root)
|
||||
from_idx = 0 # 1000
|
||||
to_idx = 600 # 1500
|
||||
to_idx = len(scene_list) # 1500
|
||||
|
||||
|
||||
cnt = 0
|
||||
|
@@ -12,8 +12,9 @@ from PytorchBoot.runners.runner import Runner
|
||||
from PytorchBoot.utils import Log
|
||||
|
||||
from utils.pts import PtsUtil
|
||||
from beans.predict_result import PredictResult
|
||||
|
||||
@stereotype.runner("inferencer")
|
||||
@stereotype.runner("inferencer_server")
|
||||
class InferencerServer(Runner):
|
||||
def __init__(self, config_path):
|
||||
super().__init__(config_path)
|
||||
@@ -24,40 +25,45 @@ class InferencerServer(Runner):
|
||||
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
|
||||
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
|
||||
self.pipeline = self.pipeline.to(self.device)
|
||||
self.pts_num = 8192
|
||||
self.voxel_size = 0.002
|
||||
|
||||
''' Experiment '''
|
||||
self.load_experiment("nbv_evaluator")
|
||||
self.load_experiment("inferencer_server")
|
||||
|
||||
def get_input_data(self, data):
|
||||
input_data = {}
|
||||
scanned_pts = data["scanned_pts"]
|
||||
scanned_n_to_world_pose_9d = data["scanned_n_to_world_pose_9d"]
|
||||
combined_scanned_views_pts = np.concatenate(scanned_pts, axis=0)
|
||||
fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud(
|
||||
combined_scanned_views_pts, self.pts_num, require_idx=True
|
||||
voxel_downsampled_combined_scanned_pts = PtsUtil.voxel_downsample_point_cloud(
|
||||
combined_scanned_views_pts, self.voxel_size
|
||||
)
|
||||
fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud(
|
||||
voxel_downsampled_combined_scanned_pts, self.pts_num, require_idx=True
|
||||
)
|
||||
combined_scanned_views_pts_mask = np.zeros(len(scanned_pts), dtype=np.uint8)
|
||||
start_idx = 0
|
||||
for i in range(len(scanned_pts)):
|
||||
end_idx = start_idx + len(scanned_pts[i])
|
||||
combined_scanned_views_pts_mask[start_idx:end_idx] = i
|
||||
start_idx = end_idx
|
||||
|
||||
fps_downsampled_combined_scanned_pts_mask = combined_scanned_views_pts_mask[fps_idx]
|
||||
|
||||
input_data["scanned_pts_mask"] = np.asarray(fps_downsampled_combined_scanned_pts_mask, dtype=np.uint8)
|
||||
input_data["scanned_pts"] = scanned_pts
|
||||
input_data["scanned_n_to_world_pose_9d"] = np.asarray(scanned_n_to_world_pose_9d, dtype=np.float32)
|
||||
input_data["combined_scanned_pts"] = np.asarray(fps_downsampled_combined_scanned_pts, dtype=np.float32)
|
||||
return input_data
|
||||
|
||||
def get_result(self, output_data):
|
||||
|
||||
estimated_delta_rot_9d = output_data["pred_pose_9d"]
|
||||
pred_pose_9d = output_data["pred_pose_9d"]
|
||||
pred_pose_9d = np.asarray(PredictResult(pred_pose_9d.cpu().numpy(), None, cluster_params=dict(eps=0.25, min_samples=3)).candidate_9d_poses, dtype=np.float32)
|
||||
result = {
|
||||
"estimated_delta_rot_9d": estimated_delta_rot_9d.tolist()
|
||||
"pred_pose_9d": pred_pose_9d.tolist()
|
||||
}
|
||||
return result
|
||||
|
||||
def collate_input(self, input_data):
|
||||
collated_input_data = {}
|
||||
collated_input_data["scanned_pts"] = [torch.tensor(input_data["scanned_pts"], dtype=torch.float32, device=self.device)]
|
||||
collated_input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(input_data["scanned_n_to_world_pose_9d"], dtype=torch.float32, device=self.device)]
|
||||
collated_input_data["combined_scanned_pts"] = torch.tensor(input_data["combined_scanned_pts"], dtype=torch.float32, device=self.device).unsqueeze(0)
|
||||
return collated_input_data
|
||||
|
||||
def run(self):
|
||||
Log.info("Loading from epoch {}.".format(self.current_epoch))
|
||||
|
||||
@@ -65,7 +71,8 @@ class InferencerServer(Runner):
|
||||
def inference():
|
||||
data = request.json
|
||||
input_data = self.get_input_data(data)
|
||||
output_data = self.pipeline.forward_test(input_data)
|
||||
collated_input_data = self.collate_input(input_data)
|
||||
output_data = self.pipeline.forward_test(collated_input_data)
|
||||
result = self.get_result(output_data)
|
||||
return jsonify(result)
|
||||
|
@@ -4,6 +4,7 @@ from utils.render import RenderUtil
|
||||
from utils.pose import PoseUtil
|
||||
from utils.pts import PtsUtil
|
||||
from utils.reconstruction import ReconstructionUtil
|
||||
from beans.predict_result import PredictResult
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
@@ -19,14 +20,19 @@ from PytorchBoot.dataset import BaseDataset
|
||||
from PytorchBoot.runners.runner import Runner
|
||||
from PytorchBoot.utils import Log
|
||||
from PytorchBoot.status import status_manager
|
||||
|
||||
from utils.data_load import DataLoadUtil
|
||||
@stereotype.runner("inferencer")
|
||||
class Inferencer(Runner):
|
||||
def __init__(self, config_path):
|
||||
|
||||
super().__init__(config_path)
|
||||
|
||||
self.script_path = ConfigManager.get(namespace.Stereotype.RUNNER, "blender_script_path")
|
||||
self.output_dir = ConfigManager.get(namespace.Stereotype.RUNNER, "output_dir")
|
||||
self.voxel_size = ConfigManager.get(namespace.Stereotype.RUNNER, "voxel_size")
|
||||
self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area")
|
||||
CM = 0.01
|
||||
self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) **2
|
||||
''' Pipeline '''
|
||||
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
|
||||
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
|
||||
@@ -34,6 +40,11 @@ class Inferencer(Runner):
|
||||
|
||||
''' Experiment '''
|
||||
self.load_experiment("nbv_evaluator")
|
||||
self.stat_result_path = os.path.join(self.output_dir, "stat.json")
|
||||
if os.path.exists(self.stat_result_path):
|
||||
with open(self.stat_result_path, "r") as f:
|
||||
self.stat_result = json.load(f)
|
||||
else:
|
||||
self.stat_result = {}
|
||||
|
||||
''' Test '''
|
||||
@@ -65,128 +76,182 @@ class Inferencer(Runner):
|
||||
for dataset_idx, test_set in enumerate(self.test_set_list):
|
||||
status_manager.set_progress("inference", "inferencer", f"dataset", dataset_idx, len(self.test_set_list))
|
||||
test_set_name = test_set.get_name()
|
||||
test_loader = test_set.get_loader()
|
||||
|
||||
if test_loader.batch_size > 1:
|
||||
Log.error("Batch size should be 1 for inference, found {} in {}".format(test_loader.batch_size, test_set_name), terminate=True)
|
||||
total=int(len(test_set))
|
||||
for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100):
|
||||
try:
|
||||
data = test_set.__getitem__(i)
|
||||
scene_name = data["scene_name"]
|
||||
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl")
|
||||
|
||||
if os.path.exists(inference_result_path):
|
||||
Log.info(f"Inference result already exists for scene: {scene_name}")
|
||||
continue
|
||||
|
||||
total=int(len(test_loader))
|
||||
loop = tqdm(enumerate(test_loader), total=total)
|
||||
for i, data in loop:
|
||||
status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total)
|
||||
test_set.process_batch(data, self.device)
|
||||
output = self.predict_sequence(data)
|
||||
self.save_inference_result(test_set_name, data["scene_name"][0], output)
|
||||
self.save_inference_result(test_set_name, data["scene_name"], output)
|
||||
except Exception as e:
|
||||
Log.error(f"Error in scene {scene_name}, {e}")
|
||||
continue
|
||||
|
||||
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
|
||||
|
||||
def predict_sequence(self, data, cr_increase_threshold=0, max_iter=50, max_retry=5):
|
||||
scene_name = data["scene_name"][0]
|
||||
def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 10, max_success=3):
|
||||
scene_name = data["scene_name"]
|
||||
Log.info(f"Processing scene: {scene_name}")
|
||||
status_manager.set_status("inference", "inferencer", "scene", scene_name)
|
||||
|
||||
''' data for rendering '''
|
||||
scene_path = data["scene_path"][0]
|
||||
O_to_L_pose = data["O_to_L_pose"][0]
|
||||
voxel_threshold = data["voxel_threshold"][0]
|
||||
filter_degree = data["filter_degree"][0]
|
||||
model_points_normals = data["model_points_normals"][0]
|
||||
model_pts = model_points_normals[:,:3]
|
||||
down_sampled_model_pts = PtsUtil.voxel_downsample_point_cloud(model_pts, voxel_threshold)
|
||||
first_frame_to_world_9d = data["first_to_world_9d"][0]
|
||||
first_frame_to_world = torch.eye(4, device=first_frame_to_world_9d.device)
|
||||
first_frame_to_world[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(first_frame_to_world_9d[:,:6])[0]
|
||||
first_frame_to_world[:3,3] = first_frame_to_world_9d[0,6:]
|
||||
first_frame_to_world = first_frame_to_world.to(self.device)
|
||||
scene_path = data["scene_path"]
|
||||
O_to_L_pose = data["O_to_L_pose"]
|
||||
voxel_threshold = self.voxel_size
|
||||
filter_degree = 75
|
||||
down_sampled_model_pts = data["gt_pts"]
|
||||
|
||||
first_frame_to_world_9d = data["first_scanned_n_to_world_pose_9d"][0]
|
||||
first_frame_to_world = np.eye(4)
|
||||
first_frame_to_world[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(first_frame_to_world_9d[:6])
|
||||
first_frame_to_world[:3,3] = first_frame_to_world_9d[6:]
|
||||
|
||||
''' data for inference '''
|
||||
input_data = {}
|
||||
input_data["scanned_pts"] = [data["first_pts"][0].to(self.device)]
|
||||
input_data["scanned_n_to_world_pose_9d"] = [data["first_to_world_9d"][0].to(self.device)]
|
||||
input_data["combined_scanned_pts"] = torch.tensor(data["first_scanned_pts"][0], dtype=torch.float32).to(self.device).unsqueeze(0)
|
||||
input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)]
|
||||
input_data["mode"] = namespace.Mode.TEST
|
||||
input_data["combined_scanned_pts"] = data["combined_scanned_pts"]
|
||||
input_pts_N = input_data["scanned_pts"][0].shape[1]
|
||||
input_pts_N = input_data["combined_scanned_pts"].shape[1]
|
||||
|
||||
first_frame_target_pts, _ = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, model_points_normals, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
|
||||
root = os.path.dirname(scene_path)
|
||||
display_table_info = DataLoadUtil.get_display_table_info(root, scene_name)
|
||||
radius = display_table_info["radius"]
|
||||
scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius))
|
||||
|
||||
first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
|
||||
scanned_view_pts = [first_frame_target_pts]
|
||||
last_pred_cr = self.compute_coverage_rate(scanned_view_pts, None, down_sampled_model_pts, threshold=voxel_threshold)
|
||||
|
||||
history_indices = [first_frame_scan_points_indices]
|
||||
last_pred_cr, added_pts_num = self.compute_coverage_rate(scanned_view_pts, None, down_sampled_model_pts, threshold=voxel_threshold)
|
||||
retry_duplication_pose = []
|
||||
retry_no_pts_pose = []
|
||||
retry_overlap_pose = []
|
||||
retry = 0
|
||||
pred_cr_seq = [last_pred_cr]
|
||||
while len(pred_cr_seq) < max_iter and retry < max_retry:
|
||||
|
||||
success = 0
|
||||
last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], voxel_threshold).shape[0]
|
||||
import time
|
||||
while len(pred_cr_seq) < max_iter and retry < max_retry and success < max_success:
|
||||
Log.green(f"iter: {len(pred_cr_seq)}, retry: {retry}/{max_retry}, success: {success}/{max_success}")
|
||||
combined_scanned_pts = np.vstack(scanned_view_pts)
|
||||
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold)
|
||||
output = self.pipeline(input_data)
|
||||
pred_pose_9d = output["pred_pose_9d"]
|
||||
pred_pose = torch.eye(4, device=pred_pose_9d.device)
|
||||
|
||||
# # save pred_pose_9d ------
|
||||
# root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result"
|
||||
# scene_dir = os.path.join(root, scene_name)
|
||||
# if not os.path.exists(scene_dir):
|
||||
# os.makedirs(scene_dir)
|
||||
# pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy")
|
||||
# pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt")
|
||||
# np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy()
|
||||
# np.save(pred_9d_path, pred_pose_9d.cpu().numpy())
|
||||
# np.savetxt(pts_path, np_combined_scanned_pts)
|
||||
# # ----- ----- -----
|
||||
predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3))
|
||||
# -----------------------
|
||||
# import ipdb; ipdb.set_trace()
|
||||
# predict_result.visualize()
|
||||
# -----------------------
|
||||
pred_pose_9d_candidates = predict_result.candidate_9d_poses
|
||||
for pred_pose_9d in pred_pose_9d_candidates:
|
||||
#import ipdb; ipdb.set_trace()
|
||||
pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0)
|
||||
pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9d[:,:6])[0]
|
||||
pred_pose[:3,3] = pred_pose_9d[0,6:]
|
||||
|
||||
try:
|
||||
new_target_pts_world, new_pts_world = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, model_points_normals, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose, require_full_scene=True)
|
||||
new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
|
||||
#import ipdb; ipdb.set_trace()
|
||||
if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold):
|
||||
curr_overlap_area_threshold = overlap_area_threshold
|
||||
else:
|
||||
curr_overlap_area_threshold = overlap_area_threshold * 0.5
|
||||
|
||||
downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold)
|
||||
overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True)
|
||||
if not overlap:
|
||||
Log.yellow("no overlap!")
|
||||
retry += 1
|
||||
retry_overlap_pose.append(pred_pose.cpu().numpy().tolist())
|
||||
continue
|
||||
|
||||
history_indices.append(new_scan_points_indices)
|
||||
except Exception as e:
|
||||
Log.warning(f"Error in scene {scene_path}, {e}")
|
||||
Log.error(f"Error in scene {scene_path}, {e}")
|
||||
print("current pose: ", pred_pose)
|
||||
print("curr_pred_cr: ", last_pred_cr)
|
||||
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
|
||||
retry += 1
|
||||
continue
|
||||
|
||||
|
||||
pred_cr = self.compute_coverage_rate(scanned_view_pts, new_target_pts_world, down_sampled_model_pts, threshold=voxel_threshold)
|
||||
|
||||
print(pred_cr, last_pred_cr, " max: ", data["max_coverage_rate"])
|
||||
if pred_cr >= data["max_coverage_rate"]:
|
||||
print("max coverage rate reached!")
|
||||
if pred_cr <= last_pred_cr + cr_increase_threshold:
|
||||
if new_target_pts.shape[0] == 0:
|
||||
Log.red("no pts in new target")
|
||||
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
|
||||
retry += 1
|
||||
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
|
||||
continue
|
||||
|
||||
retry = 0
|
||||
pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
|
||||
Log.yellow(f"{pred_cr}, {last_pred_cr}, max: , {data['seq_max_coverage_rate']}")
|
||||
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
|
||||
print("max coverage rate reached!: ", pred_cr)
|
||||
|
||||
|
||||
|
||||
pred_cr_seq.append(pred_cr)
|
||||
scanned_view_pts.append(new_target_pts_world)
|
||||
down_sampled_new_pts_world = PtsUtil.random_downsample_point_cloud(new_pts_world, input_pts_N)
|
||||
new_pts_world_aug = np.hstack([down_sampled_new_pts_world, np.ones((down_sampled_new_pts_world.shape[0], 1))])
|
||||
new_pts = np.dot(np.linalg.inv(first_frame_to_world.cpu()), new_pts_world_aug.T).T[:,:3]
|
||||
scanned_view_pts.append(new_target_pts)
|
||||
|
||||
new_pts_tensor = torch.tensor(new_pts, dtype=torch.float32).unsqueeze(0).to(self.device)
|
||||
|
||||
input_data["scanned_pts"] = [torch.cat([input_data["scanned_pts"][0] , new_pts_tensor], dim=0)]
|
||||
input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)]
|
||||
combined_scanned_views_pts = np.concatenate(input_data["scanned_pts"][0].tolist(), axis=0)
|
||||
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_views_pts, 0.002)
|
||||
|
||||
combined_scanned_pts = np.vstack(scanned_view_pts)
|
||||
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, voxel_threshold)
|
||||
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N)
|
||||
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
|
||||
|
||||
|
||||
last_pred_cr = pred_cr
|
||||
pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0]
|
||||
Log.info(f"delta pts num:,{pts_num - last_pts_num },{pts_num}, {last_pts_num}")
|
||||
|
||||
if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2:
|
||||
retry += 1
|
||||
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
|
||||
Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
|
||||
elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2:
|
||||
success += 1
|
||||
Log.success(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
|
||||
|
||||
last_pts_num = pts_num
|
||||
break
|
||||
|
||||
|
||||
input_data["scanned_pts"] = input_data["scanned_pts"][0].cpu().numpy().tolist()
|
||||
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()
|
||||
result = {
|
||||
"pred_pose_9d_seq": input_data["scanned_n_to_world_pose_9d"],
|
||||
"pts_seq": input_data["scanned_pts"],
|
||||
"combined_scanned_pts": input_data["combined_scanned_pts"],
|
||||
"target_pts_seq": scanned_view_pts,
|
||||
"coverage_rate_seq": pred_cr_seq,
|
||||
"max_coverage_rate": data["max_coverage_rate"][0],
|
||||
"max_coverage_rate": data["seq_max_coverage_rate"],
|
||||
"pred_max_coverage_rate": max(pred_cr_seq),
|
||||
"scene_name": scene_name,
|
||||
"retry_no_pts_pose": retry_no_pts_pose,
|
||||
"retry_duplication_pose": retry_duplication_pose,
|
||||
"best_seq_len": data["best_seq_len"][0],
|
||||
"retry_overlap_pose": retry_overlap_pose,
|
||||
"best_seq_len": data["best_seq_len"],
|
||||
}
|
||||
self.stat_result[scene_name] = {
|
||||
"max_coverage_rate": data["max_coverage_rate"][0],
|
||||
"success_rate": max(pred_cr_seq)/ data["max_coverage_rate"][0],
|
||||
"coverage_rate_seq": pred_cr_seq,
|
||||
"pred_max_coverage_rate": max(pred_cr_seq),
|
||||
"pred_seq_len": len(pred_cr_seq),
|
||||
}
|
||||
print('success rate: ', max(pred_cr_seq) / data["max_coverage_rate"][0])
|
||||
print('success rate: ', max(pred_cr_seq))
|
||||
|
||||
return result
|
||||
|
||||
@@ -199,6 +264,13 @@ class Inferencer(Runner):
|
||||
down_sampled_combined_point_cloud = PtsUtil.voxel_downsample_point_cloud(combined_point_cloud,threshold)
|
||||
return ReconstructionUtil.compute_coverage_rate(model_pts, down_sampled_combined_point_cloud, threshold)
|
||||
|
||||
def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
|
||||
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
|
||||
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
|
||||
idx_sort = np.argsort(inverse)
|
||||
idx_unique = idx_sort[np.cumsum(counts)-counts]
|
||||
downsampled_points = point_cloud[idx_unique]
|
||||
return downsampled_points, inverse
|
||||
|
||||
def save_inference_result(self, dataset_name, scene_name, output):
|
||||
dataset_dir = os.path.join(self.output_dir, dataset_name)
|
||||
@@ -206,7 +278,7 @@ class Inferencer(Runner):
|
||||
os.makedirs(dataset_dir)
|
||||
output_path = os.path.join(dataset_dir, f"{scene_name}.pkl")
|
||||
pickle.dump(output, open(output_path, "wb"))
|
||||
with open(os.path.join(dataset_dir, "stat.json"), "w") as f:
|
||||
with open(self.stat_result_path, "w") as f:
|
||||
json.dump(self.stat_result, f)
|
||||
|
||||
|
||||
|
@@ -24,8 +24,6 @@ class DataLoadUtil:
|
||||
for channel in float_channels:
|
||||
channel_data = exr_file.channel(channel)
|
||||
img_data.append(np.frombuffer(channel_data, dtype=np.float16).reshape((height, width)))
|
||||
|
||||
# 将各通道组合成一个 (height, width, 3) 的 RGB 图像
|
||||
img = np.stack(img_data, axis=-1)
|
||||
return img
|
||||
|
||||
|
11
utils/pts.py
11
utils/pts.py
@@ -17,6 +17,17 @@ class PtsUtil:
|
||||
unique_voxels = np.unique(voxel_indices, axis=0, return_inverse=True)
|
||||
return unique_voxels[0]*voxel_size
|
||||
|
||||
@staticmethod
|
||||
def voxel_downsample_point_cloud_random(point_cloud, voxel_size=0.005, require_idx=False):
|
||||
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
|
||||
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
|
||||
idx_sort = np.argsort(inverse)
|
||||
idx_unique = idx_sort[np.cumsum(counts)-counts]
|
||||
downsampled_points = point_cloud[idx_unique]
|
||||
if require_idx:
|
||||
return downsampled_points, inverse
|
||||
return downsampled_points
|
||||
|
||||
@staticmethod
|
||||
def random_downsample_point_cloud(point_cloud, num_points, require_idx=False):
|
||||
if point_cloud.shape[0] == 0:
|
||||
|
@@ -32,13 +32,15 @@ class ReconstructionUtil:
|
||||
|
||||
|
||||
@staticmethod
|
||||
def check_overlap(new_point_cloud, combined_point_cloud, overlap_area_threshold=25, voxel_size=0.01):
|
||||
def check_overlap(new_point_cloud, combined_point_cloud, overlap_area_threshold=25, voxel_size=0.01, require_new_added_pts_num=False):
|
||||
kdtree = cKDTree(combined_point_cloud)
|
||||
distances, _ = kdtree.query(new_point_cloud)
|
||||
overlapping_points = np.sum(distances < voxel_size*2)
|
||||
overlapping_points_num = np.sum(distances < voxel_size*2)
|
||||
cm = 0.01
|
||||
voxel_size_cm = voxel_size / cm
|
||||
overlap_area = overlapping_points * voxel_size_cm * voxel_size_cm
|
||||
overlap_area = overlapping_points_num * voxel_size_cm * voxel_size_cm
|
||||
if require_new_added_pts_num:
|
||||
return overlap_area > overlap_area_threshold, len(new_point_cloud)-np.sum(distances < voxel_size*1.2)
|
||||
return overlap_area > overlap_area_threshold
|
||||
|
||||
|
||||
|
124
utils/render.py
124
utils/render.py
@@ -1,16 +1,75 @@
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
import numpy as np
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.reconstruction import ReconstructionUtil
|
||||
from utils.pts import PtsUtil
|
||||
class RenderUtil:
|
||||
target_mask_label = (0, 255, 0)
|
||||
display_table_mask_label = (0, 0, 255)
|
||||
random_downsample_N = 32768
|
||||
min_z = 0.2
|
||||
max_z = 0.5
|
||||
|
||||
@staticmethod
|
||||
def render_pts(cam_pose, scene_path, script_path, model_points_normals, voxel_threshold=0.005, filter_degree=75, nO_to_nL_pose=None, require_full_scene=False):
|
||||
def get_world_points_and_normal(depth, mask, normal, cam_intrinsic, cam_extrinsic, random_downsample_N):
|
||||
z = depth[mask]
|
||||
i, j = np.nonzero(mask)
|
||||
x = (j - cam_intrinsic[0, 2]) * z / cam_intrinsic[0, 0]
|
||||
y = (i - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
|
||||
|
||||
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
|
||||
normal_camera = normal[mask].reshape(-1, 3)
|
||||
sampled_target_points, idx = PtsUtil.random_downsample_point_cloud(
|
||||
points_camera, random_downsample_N, require_idx=True
|
||||
)
|
||||
if len(sampled_target_points) == 0:
|
||||
return np.zeros((0, 3)), np.zeros((0, 3))
|
||||
sampled_normal_camera = normal_camera[idx]
|
||||
|
||||
points_camera_aug = np.concatenate((sampled_target_points, np.ones((sampled_target_points.shape[0], 1))), axis=-1)
|
||||
points_camera_world = np.dot(cam_extrinsic, points_camera_aug.T).T[:, :3]
|
||||
|
||||
return points_camera_world, sampled_normal_camera
|
||||
|
||||
@staticmethod
|
||||
def get_world_points(depth, mask, cam_intrinsic, cam_extrinsic, random_downsample_N):
|
||||
z = depth[mask]
|
||||
i, j = np.nonzero(mask)
|
||||
x = (j - cam_intrinsic[0, 2]) * z / cam_intrinsic[0, 0]
|
||||
y = (i - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
|
||||
|
||||
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
|
||||
sampled_target_points = PtsUtil.random_downsample_point_cloud(
|
||||
points_camera, random_downsample_N
|
||||
)
|
||||
points_camera_aug = np.concatenate((sampled_target_points, np.ones((sampled_target_points.shape[0], 1))), axis=-1)
|
||||
points_camera_world = np.dot(cam_extrinsic, points_camera_aug.T).T[:, :3]
|
||||
|
||||
return points_camera_world
|
||||
|
||||
@staticmethod
|
||||
def get_scan_points_indices(scan_points, mask, display_table_mask_label, cam_intrinsic, cam_extrinsic):
|
||||
scan_points_homogeneous = np.hstack((scan_points, np.ones((scan_points.shape[0], 1))))
|
||||
points_camera = np.dot(np.linalg.inv(cam_extrinsic), scan_points_homogeneous.T).T[:, :3]
|
||||
points_image_homogeneous = np.dot(cam_intrinsic, points_camera.T).T
|
||||
points_image_homogeneous /= points_image_homogeneous[:, 2:]
|
||||
pixel_x = points_image_homogeneous[:, 0].astype(int)
|
||||
pixel_y = points_image_homogeneous[:, 1].astype(int)
|
||||
h, w = mask.shape[:2]
|
||||
valid_indices = (pixel_x >= 0) & (pixel_x < w) & (pixel_y >= 0) & (pixel_y < h)
|
||||
mask_colors = mask[pixel_y[valid_indices], pixel_x[valid_indices]]
|
||||
selected_points_indices = np.where((mask_colors == display_table_mask_label).all(axis=-1))[0]
|
||||
selected_points_indices = np.where(valid_indices)[0][selected_points_indices]
|
||||
return selected_points_indices
|
||||
|
||||
@staticmethod
|
||||
def render_pts(cam_pose, scene_path, script_path, scan_points, voxel_threshold=0.005, filter_degree=75, nO_to_nL_pose=None, require_full_scene=False):
|
||||
|
||||
nO_to_world_pose = DataLoadUtil.get_real_cam_O_from_cam_L(cam_pose, nO_to_nL_pose, scene_path=scene_path)
|
||||
|
||||
@@ -26,27 +85,52 @@ class RenderUtil:
|
||||
with open(params_data_path, 'w') as f:
|
||||
json.dump(params, f)
|
||||
result = subprocess.run([
|
||||
'blender', '-b', '-P', script_path, '--', temp_dir
|
||||
'/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', script_path, '--', temp_dir
|
||||
], capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
print("Blender script failed:")
|
||||
print(result.stderr)
|
||||
return None
|
||||
#print(result)
|
||||
#import ipdb; ipdb.set_trace()
|
||||
path = os.path.join(temp_dir, "tmp")
|
||||
point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(path, binocular=True)
|
||||
cam_params = DataLoadUtil.load_cam_info(path, binocular=True)
|
||||
cam_info = DataLoadUtil.load_cam_info(path, binocular=True)
|
||||
depth_L, depth_R = DataLoadUtil.load_depth(
|
||||
path, cam_info["near_plane"],
|
||||
cam_info["far_plane"],
|
||||
binocular=True
|
||||
)
|
||||
mask_L, mask_R = DataLoadUtil.load_seg(path, binocular=True)
|
||||
normal_L = DataLoadUtil.load_normal(path, binocular=True, left_only=True)
|
||||
''' target points '''
|
||||
mask_img_L = mask_L
|
||||
mask_img_R = mask_R
|
||||
|
||||
''' TODO: old code: filter_points api is changed, need to update the code '''
|
||||
filtered_point_cloud = PtsUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=filter_degree)
|
||||
full_scene_point_cloud = None
|
||||
if require_full_scene:
|
||||
depth_L, depth_R = DataLoadUtil.load_depth(path, cam_params['near_plane'], cam_params['far_plane'], binocular=True)
|
||||
point_cloud_L = DataLoadUtil.get_point_cloud(depth_L, cam_params['cam_intrinsic'], cam_params['cam_to_world'])['points_world']
|
||||
point_cloud_R = DataLoadUtil.get_point_cloud(depth_R, cam_params['cam_intrinsic'], cam_params['cam_to_world_R'])['points_world']
|
||||
|
||||
point_cloud_L = PtsUtil.random_downsample_point_cloud(point_cloud_L, 65536)
|
||||
point_cloud_R = PtsUtil.random_downsample_point_cloud(point_cloud_R, 65536)
|
||||
full_scene_point_cloud = PtsUtil.get_overlapping_points(point_cloud_L, point_cloud_R)
|
||||
target_mask_img_L = (mask_L == RenderUtil.target_mask_label).all(axis=-1)
|
||||
target_mask_img_R = (mask_R == RenderUtil.target_mask_label).all(axis=-1)
|
||||
|
||||
|
||||
return filtered_point_cloud, full_scene_point_cloud
|
||||
sampled_target_points_L, sampled_target_normal_L = RenderUtil.get_world_points_and_normal(depth_L,target_mask_img_L,normal_L, cam_info["cam_intrinsic"], cam_info["cam_to_world"], RenderUtil.random_downsample_N)
|
||||
sampled_target_points_R = RenderUtil.get_world_points(depth_R, target_mask_img_R, cam_info["cam_intrinsic"], cam_info["cam_to_world_R"], RenderUtil.random_downsample_N )
|
||||
|
||||
|
||||
has_points = sampled_target_points_L.shape[0] > 0 and sampled_target_points_R.shape[0] > 0
|
||||
if has_points:
|
||||
target_points, overlap_idx = PtsUtil.get_overlapping_points(
|
||||
sampled_target_points_L, sampled_target_points_R, voxel_threshold, require_idx=True
|
||||
)
|
||||
sampled_target_normal_L = sampled_target_normal_L[overlap_idx]
|
||||
|
||||
if has_points:
|
||||
has_points = target_points.shape[0] > 0
|
||||
|
||||
if has_points:
|
||||
target_points, target_normals = PtsUtil.filter_points(
|
||||
target_points, sampled_target_normal_L, cam_info["cam_to_world"], theta_limit = filter_degree, z_range=(RenderUtil.min_z, RenderUtil.max_z)
|
||||
)
|
||||
|
||||
|
||||
scan_points_indices_L = RenderUtil.get_scan_points_indices(scan_points, mask_img_L, RenderUtil.display_table_mask_label, cam_info["cam_intrinsic"], cam_info["cam_to_world"])
|
||||
scan_points_indices_R = RenderUtil.get_scan_points_indices(scan_points, mask_img_R, RenderUtil.display_table_mask_label, cam_info["cam_intrinsic"], cam_info["cam_to_world_R"])
|
||||
scan_points_indices = np.intersect1d(scan_points_indices_L, scan_points_indices_R)
|
||||
if not has_points:
|
||||
target_points = np.zeros((0, 3))
|
||||
target_normals = np.zeros((0, 3))
|
||||
#import ipdb; ipdb.set_trace()
|
||||
return target_points, target_normals, scan_points_indices
|
16
utils/vis.py
16
utils/vis.py
@@ -7,6 +7,7 @@ import trimesh
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.pts import PtsUtil
|
||||
from utils.pose import PoseUtil
|
||||
|
||||
class visualizeUtil:
|
||||
|
||||
@@ -34,6 +35,21 @@ class visualizeUtil:
|
||||
np.savetxt(os.path.join(output_dir, "all_cam_pos.txt"), all_cam_pos)
|
||||
np.savetxt(os.path.join(output_dir, "all_cam_axis.txt"), all_cam_axis)
|
||||
|
||||
@staticmethod
|
||||
def get_cam_pose_and_cam_axis(cam_pose, is_6d_pose):
|
||||
if is_6d_pose:
|
||||
matrix_cam_pose = np.eye(4)
|
||||
matrix_cam_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(cam_pose[:6])
|
||||
matrix_cam_pose[:3, 3] = cam_pose[6:]
|
||||
else:
|
||||
matrix_cam_pose = cam_pose
|
||||
cam_pos = matrix_cam_pose[:3, 3]
|
||||
cam_axis = matrix_cam_pose[:3, 2]
|
||||
num_samples = 10
|
||||
sample_points = [cam_pos + 0.02*t * cam_axis for t in range(num_samples)]
|
||||
sample_points = np.array(sample_points)
|
||||
return cam_pos, sample_points
|
||||
|
||||
@staticmethod
|
||||
def save_all_combined_pts(root, scene, output_dir):
|
||||
length = DataLoadUtil.get_scene_seq_length(root, scene)
|
||||
|
Reference in New Issue
Block a user