4 Commits

Author SHA1 Message Date
1123e69bff fix nan 2024-10-31 12:02:48 +00:00
5e8684d149 debug 2024-10-31 11:13:37 +00:00
96fa40cc35 global_and_partial_global: upd 2024-10-30 15:34:15 +00:00
b82b92eebb global_and_partial_global: all 2024-10-30 11:49:45 +00:00
25 changed files with 450 additions and 1933 deletions

View File

@@ -1,6 +1,5 @@
from PytorchBoot.application import PytorchBootApplication from PytorchBoot.application import PytorchBootApplication
from runners.inferencer import Inferencer from runners.inferencer import Inferencer
from runners.inference_server import InferencerServer
@PytorchBootApplication("inference") @PytorchBootApplication("inference")
class InferenceApp: class InferenceApp:
@@ -15,17 +14,3 @@ class InferenceApp:
Evaluator("path_to_your_eval_config").run() Evaluator("path_to_your_eval_config").run()
''' '''
Inferencer("./configs/local/inference_config.yaml").run() Inferencer("./configs/local/inference_config.yaml").run()
@PytorchBootApplication("server")
class InferenceServerApp:
@staticmethod
def start():
'''
call default or your custom runners here, code will be executed
automatically when type "pytorch-boot run" or "ptb run" in terminal
example:
Trainer("path_to_your_train_config").run()
Evaluator("path_to_your_eval_config").run()
'''
InferencerServer("./configs/server/server_inference_server_config.yaml").run()

View File

@@ -1,162 +0,0 @@
import numpy as np
from sklearn.cluster import DBSCAN
class PredictResult:
def __init__(self, raw_predict_result, input_pts=None, cluster_params=dict(eps=0.5, min_samples=2)):
self.input_pts = input_pts
self.cluster_params = cluster_params
self.sampled_9d_pose = raw_predict_result
self.sampled_matrix_pose = self.get_sampled_matrix_pose()
self.distance_matrix = self.calculate_distance_matrix()
self.clusters = self.get_cluster_result()
self.candidate_matrix_poses = self.get_candidate_poses()
self.candidate_9d_poses = [np.concatenate((self.matrix_to_rotation_6d_numpy(matrix[:3,:3]), matrix[:3,3].reshape(-1,)), axis=-1) for matrix in self.candidate_matrix_poses]
self.cluster_num = len(self.clusters)
@staticmethod
def rotation_6d_to_matrix_numpy(d6):
a1, a2 = d6[:3], d6[3:]
b1 = a1 / np.linalg.norm(a1)
b2 = a2 - np.dot(b1, a2) * b1
b2 = b2 / np.linalg.norm(b2)
b3 = np.cross(b1, b2)
return np.stack((b1, b2, b3), axis=-2)
@staticmethod
def matrix_to_rotation_6d_numpy(matrix):
return np.copy(matrix[:2, :]).reshape((6,))
def __str__(self):
info = "Predict Result:\n"
info += f" Predicted pose number: {len(self.sampled_9d_pose)}\n"
info += f" Cluster number: {self.cluster_num}\n"
for i, cluster in enumerate(self.clusters):
info += f" - Cluster {i} size: {len(cluster)}\n"
max_distance = np.max(self.distance_matrix[self.distance_matrix != 0])
min_distance = np.min(self.distance_matrix[self.distance_matrix != 0])
info += f" Max distance: {max_distance}\n"
info += f" Min distance: {min_distance}\n"
return info
def get_sampled_matrix_pose(self):
sampled_matrix_pose = []
for pose in self.sampled_9d_pose:
rotation = pose[:6]
translation = pose[6:]
pose = self.rotation_6d_to_matrix_numpy(rotation)
pose = np.concatenate((pose, translation.reshape(-1, 1)), axis=-1)
pose = np.concatenate((pose, np.array([[0, 0, 0, 1]])), axis=-2)
sampled_matrix_pose.append(pose)
return np.array(sampled_matrix_pose)
def rotation_distance(self, R1, R2):
R = np.dot(R1.T, R2)
trace = np.trace(R)
angle = np.arccos(np.clip((trace - 1) / 2, -1, 1))
return angle
def calculate_distance_matrix(self):
n = len(self.sampled_matrix_pose)
dist_matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
dist_matrix[i, j] = self.rotation_distance(self.sampled_matrix_pose[i][:3, :3], self.sampled_matrix_pose[j][:3, :3])
return dist_matrix
def cluster_rotations(self):
clustering = DBSCAN(eps=self.cluster_params['eps'], min_samples=self.cluster_params['min_samples'], metric='precomputed')
labels = clustering.fit_predict(self.distance_matrix)
return labels
def get_cluster_result(self):
labels = self.cluster_rotations()
cluster_num = len(set(labels)) - (1 if -1 in labels else 0)
clusters = []
for _ in range(cluster_num):
clusters.append([])
for matrix_pose, label in zip(self.sampled_matrix_pose, labels):
if label != -1:
clusters[label].append(matrix_pose)
clusters.sort(key=len, reverse=True)
return clusters
def get_center_matrix_pose_from_cluster(self, cluster):
min_total_distance = float('inf')
center_matrix_pose = None
for matrix_pose in cluster:
total_distance = 0
for other_matrix_pose in cluster:
rot_distance = self.rotation_distance(matrix_pose[:3, :3], other_matrix_pose[:3, :3])
total_distance += rot_distance
if total_distance < min_total_distance:
min_total_distance = total_distance
center_matrix_pose = matrix_pose
return center_matrix_pose
def get_candidate_poses(self):
candidate_poses = []
for cluster in self.clusters:
candidate_poses.append(self.get_center_matrix_pose_from_cluster(cluster))
return candidate_poses
def visualize(self):
import plotly.graph_objects as go
fig = go.Figure()
if self.input_pts is not None:
fig.add_trace(go.Scatter3d(
x=self.input_pts[:, 0], y=self.input_pts[:, 1], z=self.input_pts[:, 2],
mode='markers', marker=dict(size=1, color='gray', opacity=0.5), name='Input Points'
))
colors = ['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg']
for i, cluster in enumerate(self.clusters):
color = colors[i]
candidate_pose = self.candidate_matrix_poses[i]
origin_candidate = candidate_pose[:3, 3]
z_axis_candidate = candidate_pose[:3, 2]
for pose in cluster:
origin = pose[:3, 3]
z_axis = pose[:3, 2]
fig.add_trace(go.Cone(
x=[origin[0]], y=[origin[1]], z=[origin[2]],
u=[z_axis[0]], v=[z_axis[1]], w=[z_axis[2]],
colorscale=color,
sizemode="absolute", sizeref=0.05, anchor="tail", showscale=False
))
fig.add_trace(go.Cone(
x=[origin_candidate[0]], y=[origin_candidate[1]], z=[origin_candidate[2]],
u=[z_axis_candidate[0]], v=[z_axis_candidate[1]], w=[z_axis_candidate[2]],
colorscale=color,
sizemode="absolute", sizeref=0.1, anchor="tail", showscale=False
))
fig.update_layout(
title="Clustered Poses and Input Points",
scene=dict(
xaxis_title='X',
yaxis_title='Y',
zaxis_title='Z'
),
margin=dict(l=0, r=0, b=0, t=40),
scene_camera=dict(eye=dict(x=1.25, y=1.25, z=1.25))
)
fig.show()
if __name__ == "__main__":
step = 0
raw_predict_result = np.load(f"inference_result_pack/inference_result_pack/{step}/all_pred_pose_9d.npy")
input_pts = np.loadtxt(f"inference_result_pack/inference_result_pack/{step}/input_pts.txt")
print(raw_predict_result.shape)
predict_result = PredictResult(raw_predict_result, input_pts, cluster_params=dict(eps=0.25, min_samples=3))
print(predict_result)
print(len(predict_result.candidate_matrix_poses))
print(predict_result.distance_matrix)
#import ipdb; ipdb.set_trace()
predict_result.visualize()

View File

@@ -1,76 +1,76 @@
runner: runner:
general: general:
seed: 0 seed: 1
device: cuda device: cuda
cuda_visible_devices: "0,1,2,3,4,5,6,7" cuda_visible_devices: "0,1,2,3,4,5,6,7"
experiment: experiment:
name: train_ab_global_only_dense name: w_gf_wo_lf_full
root_dir: "experiments" root_dir: "experiments"
epoch: 441 # -1 stands for last epoch epoch: 1 # -1 stands for last epoch
test: test:
dataset_list: dataset_list:
- OmniObject3d_test - OmniObject3d_train
blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py"
output_dir: "/media/hofee/data/data/p++_dense" output_dir: "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/test/inference_global_full_on_testset"
pipeline: nbv_reconstruction_pipeline pipeline: nbv_reconstruction_global_pts_pipeline
voxel_size: 0.003
min_new_area: 1.0
dataset: dataset:
# OmniObject3d_train: OmniObject3d_train:
# root_dir: "C:\\Document\\Datasets\\inference_test1" root_dir: "/media/hofee/repository/nbv_reconstruction_data_512"
# model_dir: "C:\\Document\\Datasets\\scaled_object_meshes"
# source: seq_reconstruction_dataset_preprocessed
# split_file: "C:\\Document\\Datasets\\data_list\\sample.txt"
# type: test
# filter_degree: 75
# ratio: 1
# batch_size: 1
# num_workers: 12
# pts_num: 8192
# load_from_preprocess: True
OmniObject3d_test:
root_dir: "/media/hofee/data/data/new_testset_output"
model_dir: "/media/hofee/data/data/scaled_object_meshes" model_dir: "/media/hofee/data/data/scaled_object_meshes"
source: seq_reconstruction_dataset_preprocessed source: seq_nbv_reconstruction_dataset
# split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt" split_file: "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/test/test_set_list.txt"
type: test type: test
filter_degree: 75 filter_degree: 75
eval_list: ratio: 1
- pose_diff
- coverage_rate_increase
ratio: 0.1
batch_size: 1 batch_size: 1
num_workers: 12 num_workers: 12
pts_num: 8192 pts_num: 4096
load_from_preprocess: True load_from_preprocess: False
pipeline: pipeline:
nbv_reconstruction_pipeline: nbv_reconstruction_local_pts_pipeline:
modules: modules:
pts_encoder: pointnet++_encoder pts_encoder: pointnet_encoder
seq_encoder: transformer_seq_encoder seq_encoder: transformer_seq_encoder
pose_encoder: pose_encoder pose_encoder: pose_encoder
view_finder: gf_view_finder view_finder: gf_view_finder
eps: 1e-5 eps: 1e-5
global_scanned_feat: False
nbv_reconstruction_global_pts_pipeline:
modules:
pts_encoder: pointnet_encoder
pose_seq_encoder: transformer_pose_seq_encoder
pose_encoder: pose_encoder
view_finder: gf_view_finder
eps: 1e-5
global_scanned_feat: True global_scanned_feat: True
module: module:
pointnet++_encoder:
in_dim: 3
params_name: light
pointnet_encoder: pointnet_encoder:
in_dim: 3 in_dim: 3
out_dim: 1024 out_dim: 1024
global_feat: True global_feat: True
feature_transform: False feature_transform: False
transformer_seq_encoder: transformer_seq_encoder:
embed_dim: 256 pts_embed_dim: 1024
pose_embed_dim: 256
num_heads: 4
ffn_dim: 256
num_layers: 3
output_dim: 2048
transformer_pose_seq_encoder:
pose_embed_dim: 256
num_heads: 4 num_heads: 4
ffn_dim: 256 ffn_dim: 256
num_layers: 3 num_layers: 3
@@ -86,8 +86,7 @@ module:
sample_mode: ode sample_mode: ode
sampling_steps: 500 sampling_steps: 500
sde_mode: ve sde_mode: ve
pose_encoder: pose_encoder:
pose_dim: 9 pose_dim: 9
out_dim: 256 out_dim: 256
pts_num_encoder:
out_dim: 64

View File

@@ -15,13 +15,13 @@ runner:
overlap_area_threshold: 30 overlap_area_threshold: 30
compute_with_normal: False compute_with_normal: False
scan_points_threshold: 10 scan_points_threshold: 10
overwrite: False overwrite: False
seq_num: 10 seq_num: 10
dataset_list: dataset_list:
- OmniObject3d - OmniObject3d
datasets: datasets:
OmniObject3d: OmniObject3d:
root_dir: /media/hofee/data/data/test_bottle/view root_dir: /data/hofee/nbv_rec_part2_preprocessed
from: 0 from: 155
to: -1 # ..-1 means end to: 165 # ..-1 means end

View File

@@ -8,11 +8,11 @@ runner:
root_dir: experiments root_dir: experiments
generate: generate:
port: 5002 port: 5002
from: 0 from: 600
to: 50 # -1 means all to: -1 # -1 means all
object_dir: /media/hofee/data/data/test_bottle/bottle_mesh object_dir: /media/hofee/data/data/object_meshes_part1
table_model_path: /media/hofee/data/data/others/table.obj table_model_path: "/media/hofee/data/data/others/table.obj"
output_dir: /media/hofee/data/data/test_bottle/view output_dir: /media/hofee/repository/data_part_1
binocular_vision: true binocular_vision: true
plane_size: 10 plane_size: 10
max_views: 512 max_views: 512
@@ -34,7 +34,7 @@ runner:
max_y: 0.05 max_y: 0.05
min_z: 0.01 min_z: 0.01
max_z: 0.01 max_z: 0.01
random_rotation_ratio: 0.0 random_rotation_ratio: 0.3
random_objects: random_objects:
num: 4 num: 4
cluster: 0.9 cluster: 0.9

View File

@@ -1,53 +0,0 @@
runner:
general:
seed: 0
device: cuda
cuda_visible_devices: "0,1,2,3,4,5,6,7"
experiment:
name: train_ab_global_only
root_dir: "experiments"
epoch: -1 # -1 stands for last epoch
pipeline: nbv_reconstruction_pipeline
voxel_size: 0.003
pipeline:
nbv_reconstruction_pipeline:
modules:
pts_encoder: pointnet_encoder
seq_encoder: transformer_seq_encoder
pose_encoder: pose_encoder
view_finder: gf_view_finder
eps: 1e-5
global_scanned_feat: True
module:
pointnet_encoder:
in_dim: 3
out_dim: 1024
global_feat: True
feature_transform: False
transformer_seq_encoder:
embed_dim: 256
num_heads: 4
ffn_dim: 256
num_layers: 3
output_dim: 1024
gf_view_finder:
t_feat_dim: 128
pose_feat_dim: 256
main_feat_dim: 2048
regression_head: Rx_Ry_and_T
pose_mode: rot_matrix
per_point_feature: False
sample_mode: ode
sampling_steps: 500
sde_mode: ve
pose_encoder:
pose_dim: 9
out_dim: 256
pts_num_encoder:
out_dim: 64

View File

@@ -6,17 +6,17 @@ runner:
cuda_visible_devices: "0,1,2,3,4,5,6,7" cuda_visible_devices: "0,1,2,3,4,5,6,7"
experiment: experiment:
name: debug name: server_split_dataset
root_dir: "experiments" root_dir: "experiments"
split: # split: #
root_dir: "/data/hofee/data/packed_preprocessed_data" root_dir: "/data/hofee/data/new_full_data"
type: "unseen_instance" # "unseen_category" type: "unseen_instance" # "unseen_category"
datasets: datasets:
OmniObject3d_train: OmniObject3d_train:
path: "/data/hofee/data/OmniObject3d_train.txt" path: "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt"
ratio: 0.9 ratio: 0.9
OmniObject3d_test: OmniObject3d_test:
path: "/data/hofee/data/OmniObject3d_test.txt" path: "/data/hofee/data/new_full_data_list/OmniObject3d_test.txt"
ratio: 0.1 ratio: 0.1

View File

@@ -3,11 +3,11 @@ runner:
general: general:
seed: 0 seed: 0
device: cuda device: cuda
cuda_visible_devices: "0" cuda_visible_devices: "1"
parallel: False parallel: False
experiment: experiment:
name: train_ab_global_only_with_wp_p++_strong name: train_ab_global_and_partial_global
root_dir: "experiments" root_dir: "experiments"
use_checkpoint: False use_checkpoint: False
epoch: -1 # -1 stands for last epoch epoch: -1 # -1 stands for last epoch
@@ -19,13 +19,13 @@ runner:
optimizer: optimizer:
type: Adam type: Adam
lr: 0.0001 lr: 0.0001
losses: losses:
- gf_loss - gf_loss
dataset: OmniObject3d_train dataset: OmniObject3d_train
test: test:
frequency: 3 # test frequency frequency: 3 # test frequency
dataset_list: dataset_list:
- OmniObject3d_test #- OmniObject3d_test
- OmniObject3d_val - OmniObject3d_val
pipeline: nbv_reconstruction_pipeline pipeline: nbv_reconstruction_pipeline
@@ -39,7 +39,7 @@ dataset:
type: train type: train
cache: True cache: True
ratio: 1 ratio: 1
batch_size: 64 batch_size: 80
num_workers: 128 num_workers: 128
pts_num: 8192 pts_num: 8192
load_from_preprocess: True load_from_preprocess: True
@@ -80,7 +80,7 @@ dataset:
pipeline: pipeline:
nbv_reconstruction_pipeline: nbv_reconstruction_pipeline:
modules: modules:
pts_encoder: pointnet++_encoder pts_encoder: pointnet_encoder
seq_encoder: transformer_seq_encoder seq_encoder: transformer_seq_encoder
pose_encoder: pose_encoder pose_encoder: pose_encoder
view_finder: gf_view_finder view_finder: gf_view_finder
@@ -96,12 +96,8 @@ module:
global_feat: True global_feat: True
feature_transform: False feature_transform: False
pointnet++_encoder:
in_dim: 3
params_name: strong
transformer_seq_encoder: transformer_seq_encoder:
embed_dim: 256 embed_dim: 320
num_heads: 4 num_heads: 4
ffn_dim: 256 ffn_dim: 256
num_layers: 3 num_layers: 3
@@ -110,7 +106,7 @@ module:
gf_view_finder: gf_view_finder:
t_feat_dim: 128 t_feat_dim: 128
pose_feat_dim: 256 pose_feat_dim: 256
main_feat_dim: 5120 main_feat_dim: 2048
regression_head: Rx_Ry_and_T regression_head: Rx_Ry_and_T
pose_mode: rot_matrix pose_mode: rot_matrix
per_point_feature: False per_point_feature: False

View File

@@ -4,10 +4,10 @@ import PytorchBoot.namespace as namespace
import PytorchBoot.stereotype as stereotype import PytorchBoot.stereotype as stereotype
from PytorchBoot.config import ConfigManager from PytorchBoot.config import ConfigManager
from PytorchBoot.utils.log_util import Log from PytorchBoot.utils.log_util import Log
import torch import torch
import os import os
import sys import sys
import time
sys.path.append(r"/data/hofee/project/nbv_rec/nbv_reconstruction") sys.path.append(r"/data/hofee/project/nbv_rec/nbv_reconstruction")
@@ -51,7 +51,7 @@ class NBVReconstructionDataset(BaseDataset):
scene_name_list.append(scene_name) scene_name_list.append(scene_name)
return scene_name_list return scene_name_list
def get_datalist(self, bias=False): def get_datalist(self):
datalist = [] datalist = []
for scene_name in self.scene_name_list: for scene_name in self.scene_name_list:
seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name) seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
@@ -80,18 +80,16 @@ class NBVReconstructionDataset(BaseDataset):
for data_pair in label_data["data_pairs"]: for data_pair in label_data["data_pairs"]:
scanned_views = data_pair[0] scanned_views = data_pair[0]
next_best_view = data_pair[1] next_best_view = data_pair[1]
accept_probability = scanned_views[-1][1] datalist.append(
if accept_probability > np.random.rand(): {
datalist.append( "scanned_views": scanned_views,
{ "next_best_view": next_best_view,
"scanned_views": scanned_views, "seq_max_coverage_rate": max_coverage_rate,
"next_best_view": next_best_view, "scene_name": scene_name,
"seq_max_coverage_rate": max_coverage_rate, "label_idx": seq_idx,
"scene_name": scene_name, "scene_max_coverage_rate": scene_max_coverage_rate,
"label_idx": seq_idx, }
"scene_max_coverage_rate": scene_max_coverage_rate, )
}
)
return datalist return datalist
def preprocess_cache(self): def preprocess_cache(self):
@@ -117,8 +115,13 @@ class NBVReconstructionDataset(BaseDataset):
except Exception as e: except Exception as e:
Log.error(f"Save cache failed: {e}") Log.error(f"Save cache failed: {e}")
def voxel_downsample_with_mask(self, pts, voxel_size): def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
pass voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
idx_sort = np.argsort(inverse)
idx_unique = idx_sort[np.cumsum(counts)-counts]
downsampled_points = point_cloud[idx_unique]
return downsampled_points, inverse
def __getitem__(self, index): def __getitem__(self, index):
@@ -132,6 +135,9 @@ class NBVReconstructionDataset(BaseDataset):
scanned_coverages_rate, scanned_coverages_rate,
scanned_n_to_world_pose, scanned_n_to_world_pose,
) = ([], [], []) ) = ([], [], [])
start_time = time.time()
start_indices = [0]
total_points = 0
for view in scanned_views: for view in scanned_views:
frame_idx = view[0] frame_idx = view[0]
coverage_rate = view[1] coverage_rate = view[1]
@@ -153,8 +159,12 @@ class NBVReconstructionDataset(BaseDataset):
n_to_world_trans = n_to_world_pose[:3, 3] n_to_world_trans = n_to_world_pose[:3, 3]
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0) n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0)
scanned_n_to_world_pose.append(n_to_world_9d) scanned_n_to_world_pose.append(n_to_world_9d)
total_points += len(downsampled_target_point_cloud)
start_indices.append(total_points)
end_time = time.time()
#Log.info(f"load data time: {end_time - start_time}")
nbv_idx, nbv_coverage_rate = nbv[0], nbv[1] nbv_idx, nbv_coverage_rate = nbv[0], nbv[1]
nbv_path = DataLoadUtil.get_path(self.root_dir, scene_name, nbv_idx) nbv_path = DataLoadUtil.get_path(self.root_dir, scene_name, nbv_idx)
cam_info = DataLoadUtil.load_cam_info(nbv_path) cam_info = DataLoadUtil.load_cam_info(nbv_path)
@@ -167,14 +177,27 @@ class NBVReconstructionDataset(BaseDataset):
best_to_world_9d = np.concatenate( best_to_world_9d = np.concatenate(
[best_to_world_6d, best_to_world_trans], axis=0 [best_to_world_6d, best_to_world_trans], axis=0
) )
combined_scanned_views_pts = np.concatenate(scanned_views_pts, axis=0)
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_views_pts, 0.002)
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, self.pts_num)
combined_scanned_views_pts = np.concatenate(scanned_views_pts, axis=0)
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_views_pts, 0.003)
random_downsampled_combined_scanned_pts_np, random_downsample_idx = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, self.pts_num, require_idx=True)
all_idx_unique = np.arange(len(voxel_downsampled_combined_scanned_pts_np))
all_random_downsample_idx = all_idx_unique[random_downsample_idx]
scanned_pts_mask = []
for idx, start_idx in enumerate(start_indices):
if idx == len(start_indices) - 1:
break
end_idx = start_indices[idx+1]
view_inverse = inverse[start_idx:end_idx]
view_unique_downsampled_idx = np.unique(view_inverse)
view_unique_downsampled_idx_set = set(view_unique_downsampled_idx)
mask = np.array([idx in view_unique_downsampled_idx_set for idx in all_random_downsample_idx])
scanned_pts_mask.append(mask)
data_item = { data_item = {
"scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3) "scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
"combined_scanned_pts": np.asarray(random_downsampled_combined_scanned_pts_np, dtype=np.float32), # Ndarray(N x 3) "combined_scanned_pts": np.asarray(random_downsampled_combined_scanned_pts_np, dtype=np.float32), # Ndarray(N x 3)
"scanned_pts_mask": np.asarray(scanned_pts_mask, dtype=np.bool), # Ndarray(N)
"scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1) "scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1)
"scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9) "scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
"best_coverage_rate": nbv_coverage_rate, # Float, range(0, 1) "best_coverage_rate": nbv_coverage_rate, # Float, range(0, 1)
@@ -200,7 +223,9 @@ class NBVReconstructionDataset(BaseDataset):
collate_data["scanned_n_to_world_pose_9d"] = [ collate_data["scanned_n_to_world_pose_9d"] = [
torch.tensor(item["scanned_n_to_world_pose_9d"]) for item in batch torch.tensor(item["scanned_n_to_world_pose_9d"]) for item in batch
] ]
collate_data["scanned_pts_mask"] = [
torch.tensor(item["scanned_pts_mask"]) for item in batch
]
''' ------ Fixed Length ------ ''' ''' ------ Fixed Length ------ '''
collate_data["best_to_world_pose_9d"] = torch.stack( collate_data["best_to_world_pose_9d"] = torch.stack(
@@ -209,12 +234,14 @@ class NBVReconstructionDataset(BaseDataset):
collate_data["combined_scanned_pts"] = torch.stack( collate_data["combined_scanned_pts"] = torch.stack(
[torch.tensor(item["combined_scanned_pts"]) for item in batch] [torch.tensor(item["combined_scanned_pts"]) for item in batch]
) )
for key in batch[0].keys(): for key in batch[0].keys():
if key not in [ if key not in [
"scanned_pts", "scanned_pts",
"scanned_n_to_world_pose_9d", "scanned_n_to_world_pose_9d",
"best_to_world_pose_9d", "best_to_world_pose_9d",
"combined_scanned_pts", "combined_scanned_pts",
"scanned_pts_mask",
]: ]:
collate_data[key] = [item[key] for item in batch] collate_data[key] = [item[key] for item in batch]
return collate_data return collate_data
@@ -230,10 +257,9 @@ if __name__ == "__main__":
torch.manual_seed(seed) torch.manual_seed(seed)
np.random.seed(seed) np.random.seed(seed)
config = { config = {
"root_dir": "/data/hofee/data/new_full_data", "root_dir": "/data/hofee/nbv_rec_part2_preprocessed",
"model_dir": "../data/scaled_object_meshes",
"source": "nbv_reconstruction_dataset", "source": "nbv_reconstruction_dataset",
"split_file": "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt", "split_file": "/data/hofee/data/sample.txt",
"load_from_preprocess": True, "load_from_preprocess": True,
"ratio": 0.5, "ratio": 0.5,
"batch_size": 2, "batch_size": 2,

View File

@@ -1,154 +0,0 @@
import numpy as np
from PytorchBoot.dataset import BaseDataset
import PytorchBoot.namespace as namespace
import PytorchBoot.stereotype as stereotype
from PytorchBoot.utils.log_util import Log
import torch
import os
import sys
sys.path.append(r"/home/data/hofee/project/nbv_rec/nbv_reconstruction")
from utils.data_load import DataLoadUtil
from utils.pose import PoseUtil
from utils.pts import PtsUtil
@stereotype.dataset("old_seq_nbv_reconstruction_dataset")
class SeqNBVReconstructionDataset(BaseDataset):
def __init__(self, config):
super(SeqNBVReconstructionDataset, self).__init__(config)
self.type = config["type"]
if self.type != namespace.Mode.TEST:
Log.error("Dataset <seq_nbv_reconstruction_dataset> Only support test mode", terminate=True)
self.config = config
self.root_dir = config["root_dir"]
self.split_file_path = config["split_file"]
self.scene_name_list = self.load_scene_name_list()
self.datalist = self.get_datalist()
self.pts_num = config["pts_num"]
self.model_dir = config["model_dir"]
self.filter_degree = config["filter_degree"]
self.load_from_preprocess = config.get("load_from_preprocess", False)
def load_scene_name_list(self):
scene_name_list = []
with open(self.split_file_path, "r") as f:
for line in f:
scene_name = line.strip()
scene_name_list.append(scene_name)
return scene_name_list
def get_datalist(self):
datalist = []
for scene_name in self.scene_name_list:
seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
scene_max_coverage_rate = 0
scene_max_cr_idx = 0
for seq_idx in range(seq_num):
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, seq_idx)
label_data = DataLoadUtil.load_label(label_path)
max_coverage_rate = label_data["max_coverage_rate"]
if max_coverage_rate > scene_max_coverage_rate:
scene_max_coverage_rate = max_coverage_rate
scene_max_cr_idx = seq_idx
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, scene_max_cr_idx)
label_data = DataLoadUtil.load_label(label_path)
first_frame = label_data["best_sequence"][0]
best_seq_len = len(label_data["best_sequence"])
datalist.append({
"scene_name": scene_name,
"first_frame": first_frame,
"max_coverage_rate": scene_max_coverage_rate,
"best_seq_len": best_seq_len,
"label_idx": scene_max_cr_idx,
})
return datalist
def __getitem__(self, index):
data_item_info = self.datalist[index]
first_frame_idx = data_item_info["first_frame"][0]
first_frame_coverage = data_item_info["first_frame"][1]
max_coverage_rate = data_item_info["max_coverage_rate"]
scene_name = data_item_info["scene_name"]
first_cam_info = DataLoadUtil.load_cam_info(DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx), binocular=True)
first_view_path = DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx)
first_left_cam_pose = first_cam_info["cam_to_world"]
first_center_cam_pose = first_cam_info["cam_to_world_O"]
first_target_point_cloud = DataLoadUtil.load_from_preprocessed_pts(first_view_path)
first_pts_num = first_target_point_cloud.shape[0]
first_downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(first_target_point_cloud, self.pts_num)
first_to_world_rot_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(first_left_cam_pose[:3,:3]))
first_to_world_trans = first_left_cam_pose[:3,3]
first_to_world_9d = np.concatenate([first_to_world_rot_6d, first_to_world_trans], axis=0)
diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
voxel_threshold = diag*0.02
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
scene_path = os.path.join(self.root_dir, scene_name)
model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
data_item = {
"first_pts_num": np.asarray(
first_pts_num, dtype=np.int32
),
"first_pts": np.asarray([first_downsampled_target_point_cloud],dtype=np.float32),
"combined_scanned_pts": np.asarray(first_downsampled_target_point_cloud,dtype=np.float32),
"first_to_world_9d": np.asarray([first_to_world_9d],dtype=np.float32),
"scene_name": scene_name,
"max_coverage_rate": max_coverage_rate,
"voxel_threshold": voxel_threshold,
"filter_degree": self.filter_degree,
"O_to_L_pose": first_O_to_first_L_pose,
"first_frame_coverage": first_frame_coverage,
"scene_path": scene_path,
"model_points_normals": model_points_normals,
"best_seq_len": data_item_info["best_seq_len"],
"first_frame_id": first_frame_idx,
}
return data_item
def __len__(self):
return len(self.datalist)
def get_collate_fn(self):
def collate_fn(batch):
collate_data = {}
collate_data["first_pts"] = [torch.tensor(item['first_pts']) for item in batch]
collate_data["first_to_world_9d"] = [torch.tensor(item['first_to_world_9d']) for item in batch]
collate_data["combined_scanned_pts"] = torch.stack([torch.tensor(item['combined_scanned_pts']) for item in batch])
for key in batch[0].keys():
if key not in ["first_pts", "first_to_world_9d", "combined_scanned_pts"]:
collate_data[key] = [item[key] for item in batch]
return collate_data
return collate_fn
# -------------- Debug ---------------- #
if __name__ == "__main__":
import torch
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
config = {
"root_dir": "/home/data/hofee/project/nbv_rec/data/nbv_rec_data_512_preproc_npy",
"split_file": "/home/data/hofee/project/nbv_rec/data/OmniObject3d_train.txt",
"model_dir": "/home/data/hofee/project/nbv_rec/data/scaled_object_meshes",
"ratio": 0.005,
"batch_size": 2,
"filter_degree": 75,
"num_workers": 0,
"pts_num": 32684,
"type": namespace.Mode.TEST,
"load_from_preprocess": True
}
ds = SeqNBVReconstructionDataset(config)
print(len(ds))
#ds.__getitem__(10)
dl = ds.get_loader(shuffle=True)
for idx, data in enumerate(dl):
data = ds.process_batch(data, "cuda:0")
print(data)
# ------ Debug Start ------
import ipdb;ipdb.set_trace()
# ------ Debug End ------+

View File

@@ -75,8 +75,6 @@ class NBVReconstructionPipeline(nn.Module):
def forward_test(self, data): def forward_test(self, data):
main_feat = self.get_main_feat(data) main_feat = self.get_main_feat(data)
repeat_num = data.get("repeat_num", 1)
main_feat = main_feat.repeat(repeat_num, 1)
estimated_delta_rot_9d, in_process_sample = self.view_finder.next_best_view( estimated_delta_rot_9d, in_process_sample = self.view_finder.next_best_view(
main_feat main_feat
) )
@@ -91,25 +89,49 @@ class NBVReconstructionPipeline(nn.Module):
"scanned_n_to_world_pose_9d" "scanned_n_to_world_pose_9d"
] # List(B): Tensor(S x 9) ] # List(B): Tensor(S x 9)
scanned_pts_mask_batch = data["scanned_pts_mask"] # List(B): Tensor(N)
device = next(self.parameters()).device device = next(self.parameters()).device
embedding_list_batch = [] embedding_list_batch = []
combined_scanned_pts_batch = data["combined_scanned_pts"] # Tensor(B x N x 3) combined_scanned_pts_batch = data["combined_scanned_pts"] # Tensor(B x N x 3)
global_scanned_feat = self.pts_encoder.encode_points( global_scanned_feat, per_point_feat_batch = self.pts_encoder.encode_points(
combined_scanned_pts_batch, require_per_point_feat=False combined_scanned_pts_batch, require_per_point_feat=True
) # global_scanned_feat: Tensor(B x Dg) ) # global_scanned_feat: Tensor(B x Dg)
batch_size = len(scanned_n_to_world_pose_9d_batch)
for scanned_n_to_world_pose_9d in scanned_n_to_world_pose_9d_batch: for i in range(batch_size):
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d.to(device) # Tensor(S x 9) seq_len = len(scanned_n_to_world_pose_9d_batch[i])
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d_batch[i].to(device) # Tensor(S x 9)
scanned_pts_mask = scanned_pts_mask_batch[i] # Tensor(S x N)
per_point_feat = per_point_feat_batch[i] # Tensor(N x Dp)
partial_point_feat_seq = []
for j in range(seq_len):
partial_per_point_feat = per_point_feat[scanned_pts_mask[j]]
if partial_per_point_feat.shape[0] == 0:
partial_point_feat = torch.zeros(per_point_feat.shape[1], device=device)
else:
partial_point_feat = torch.mean(partial_per_point_feat, dim=0) # Tensor(Dp)
partial_point_feat_seq.append(partial_point_feat)
partial_point_feat_seq = torch.stack(partial_point_feat_seq, dim=0) # Tensor(S x Dp)
pose_feat_seq = self.pose_encoder.encode_pose(scanned_n_to_world_pose_9d) # Tensor(S x Dp) pose_feat_seq = self.pose_encoder.encode_pose(scanned_n_to_world_pose_9d) # Tensor(S x Dp)
seq_embedding = pose_feat_seq
seq_embedding = torch.cat([partial_point_feat_seq, pose_feat_seq], dim=-1)
embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp)) embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp))
seq_feat = self.seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds) seq_feat = self.seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg)) main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg))
if torch.isnan(main_feat).any(): if torch.isnan(main_feat).any():
for i in range(len(main_feat)):
if torch.isnan(main_feat[i]).any():
scanned_pts_mask = scanned_pts_mask_batch[i]
Log.info(f"scanned_pts_mask shape: {scanned_pts_mask.shape}")
Log.info(f"scanned_pts_mask sum: {scanned_pts_mask.sum()}")
import ipdb
ipdb.set_trace()
Log.error("nan in main_feat", True) Log.error("nan in main_feat", True)
return main_feat return main_feat

View File

@@ -1,209 +1,154 @@
import numpy as np import numpy as np
from PytorchBoot.dataset import BaseDataset from PytorchBoot.dataset import BaseDataset
import PytorchBoot.namespace as namespace import PytorchBoot.namespace as namespace
import PytorchBoot.stereotype as stereotype import PytorchBoot.stereotype as stereotype
from PytorchBoot.config import ConfigManager from PytorchBoot.utils.log_util import Log
from PytorchBoot.utils.log_util import Log import torch
import torch import os
import os import sys
import sys sys.path.append(r"/home/data/hofee/project/nbv_rec/nbv_reconstruction")
sys.path.append(r"/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction") from utils.data_load import DataLoadUtil
from utils.pose import PoseUtil
from utils.data_load import DataLoadUtil from utils.pts import PtsUtil
from utils.pose import PoseUtil
from utils.pts import PtsUtil @stereotype.dataset("seq_nbv_reconstruction_dataset")
class SeqNBVReconstructionDataset(BaseDataset):
def __init__(self, config):
@stereotype.dataset("seq_reconstruction_dataset") super(SeqNBVReconstructionDataset, self).__init__(config)
class SeqReconstructionDataset(BaseDataset): self.type = config["type"]
def __init__(self, config): if self.type != namespace.Mode.TEST:
super(SeqReconstructionDataset, self).__init__(config) Log.error("Dataset <seq_nbv_reconstruction_dataset> Only support test mode", terminate=True)
self.config = config self.config = config
self.root_dir = config["root_dir"] self.root_dir = config["root_dir"]
self.split_file_path = config["split_file"] self.split_file_path = config["split_file"]
self.scene_name_list = self.load_scene_name_list() self.scene_name_list = self.load_scene_name_list()
self.datalist = self.get_datalist() self.datalist = self.get_datalist()
self.pts_num = config["pts_num"]
self.pts_num = config["pts_num"]
self.type = config["type"] self.model_dir = config["model_dir"]
self.cache = config.get("cache") self.filter_degree = config["filter_degree"]
self.load_from_preprocess = config.get("load_from_preprocess", False) self.load_from_preprocess = config.get("load_from_preprocess", False)
if self.type == namespace.Mode.TEST:
#self.model_dir = config["model_dir"] def load_scene_name_list(self):
self.filter_degree = config["filter_degree"] scene_name_list = []
if self.type == namespace.Mode.TRAIN: with open(self.split_file_path, "r") as f:
scale_ratio = 1 for line in f:
self.datalist = self.datalist*scale_ratio scene_name = line.strip()
if self.cache: scene_name_list.append(scene_name)
expr_root = ConfigManager.get("runner", "experiment", "root_dir") return scene_name_list
expr_name = ConfigManager.get("runner", "experiment", "name")
self.cache_dir = os.path.join(expr_root, expr_name, "cache") def get_datalist(self):
# self.preprocess_cache() datalist = []
for scene_name in self.scene_name_list:
def load_scene_name_list(self): seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
scene_name_list = [] scene_max_coverage_rate = 0
with open(self.split_file_path, "r") as f: scene_max_cr_idx = 0
for line in f:
scene_name = line.strip() for seq_idx in range(seq_num):
if not os.path.exists(os.path.join(self.root_dir, scene_name)): label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, seq_idx)
continue label_data = DataLoadUtil.load_label(label_path)
scene_name_list.append(scene_name) max_coverage_rate = label_data["max_coverage_rate"]
return scene_name_list if max_coverage_rate > scene_max_coverage_rate:
scene_max_coverage_rate = max_coverage_rate
def get_scene_name_list(self): scene_max_cr_idx = seq_idx
return self.scene_name_list
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, scene_max_cr_idx)
label_data = DataLoadUtil.load_label(label_path)
def get_datalist(self): first_frame = label_data["best_sequence"][0]
datalist = [] best_seq_len = len(label_data["best_sequence"])
total = len(self.scene_name_list) datalist.append({
for idx, scene_name in enumerate(self.scene_name_list): "scene_name": scene_name,
print(f"processing {scene_name} ({idx}/{total})") "first_frame": first_frame,
scene_max_cr_idx = 0 "max_coverage_rate": scene_max_coverage_rate,
frame_len = DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name) "best_seq_len": best_seq_len,
"label_idx": scene_max_cr_idx,
for i in range(10,frame_len): })
path = DataLoadUtil.get_path(self.root_dir, scene_name, i) return datalist
pts = DataLoadUtil.load_from_preprocessed_pts(path, "npy")
print(pts.shape) def __getitem__(self, index):
if pts.shape[0] == 0: data_item_info = self.datalist[index]
continue first_frame_idx = data_item_info["first_frame"][0]
else: first_frame_coverage = data_item_info["first_frame"][1]
break max_coverage_rate = data_item_info["max_coverage_rate"]
print(i) scene_name = data_item_info["scene_name"]
datalist.append({ first_cam_info = DataLoadUtil.load_cam_info(DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx), binocular=True)
"scene_name": scene_name, first_view_path = DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx)
"first_frame": i, first_left_cam_pose = first_cam_info["cam_to_world"]
"best_seq_len": -1, first_center_cam_pose = first_cam_info["cam_to_world_O"]
"max_coverage_rate": 1.0, first_target_point_cloud = DataLoadUtil.load_from_preprocessed_pts(first_view_path)
"label_idx": scene_max_cr_idx, first_pts_num = first_target_point_cloud.shape[0]
}) first_downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(first_target_point_cloud, self.pts_num)
return datalist first_to_world_rot_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(first_left_cam_pose[:3,:3]))
first_to_world_trans = first_left_cam_pose[:3,3]
def preprocess_cache(self): first_to_world_9d = np.concatenate([first_to_world_rot_6d, first_to_world_trans], axis=0)
Log.info("preprocessing cache...") diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
for item_idx in range(len(self.datalist)): voxel_threshold = diag*0.02
self.__getitem__(item_idx) first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
Log.success("finish preprocessing cache.") scene_path = os.path.join(self.root_dir, scene_name)
model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
def load_from_cache(self, scene_name, curr_frame_idx):
cache_name = f"{scene_name}_{curr_frame_idx}.txt" data_item = {
cache_path = os.path.join(self.cache_dir, cache_name) "first_pts_num": np.asarray(
if os.path.exists(cache_path): first_pts_num, dtype=np.int32
data = np.loadtxt(cache_path) ),
return data "first_pts": np.asarray([first_downsampled_target_point_cloud],dtype=np.float32),
else: "combined_scanned_pts": np.asarray(first_downsampled_target_point_cloud,dtype=np.float32),
return None "first_to_world_9d": np.asarray([first_to_world_9d],dtype=np.float32),
"scene_name": scene_name,
def save_to_cache(self, scene_name, curr_frame_idx, data): "max_coverage_rate": max_coverage_rate,
cache_name = f"{scene_name}_{curr_frame_idx}.txt" "voxel_threshold": voxel_threshold,
cache_path = os.path.join(self.cache_dir, cache_name) "filter_degree": self.filter_degree,
try: "O_to_L_pose": first_O_to_first_L_pose,
np.savetxt(cache_path, data) "first_frame_coverage": first_frame_coverage,
except Exception as e: "scene_path": scene_path,
Log.error(f"Save cache failed: {e}") "model_points_normals": model_points_normals,
"best_seq_len": data_item_info["best_seq_len"],
def seq_combined_pts(self, scene, frame_idx_list): "first_frame_id": first_frame_idx,
all_combined_pts = [] }
for i in frame_idx_list: return data_item
path = DataLoadUtil.get_path(self.root_dir, scene, i)
pts = DataLoadUtil.load_from_preprocessed_pts(path,"npy") def __len__(self):
if pts.shape[0] == 0: return len(self.datalist)
continue
all_combined_pts.append(pts) def get_collate_fn(self):
all_combined_pts = np.vstack(all_combined_pts) def collate_fn(batch):
downsampled_all_pts = PtsUtil.voxel_downsample_point_cloud(all_combined_pts, 0.003) collate_data = {}
return downsampled_all_pts collate_data["first_pts"] = [torch.tensor(item['first_pts']) for item in batch]
collate_data["first_to_world_9d"] = [torch.tensor(item['first_to_world_9d']) for item in batch]
def __getitem__(self, index): collate_data["combined_scanned_pts"] = torch.stack([torch.tensor(item['combined_scanned_pts']) for item in batch])
data_item_info = self.datalist[index] for key in batch[0].keys():
max_coverage_rate = data_item_info["max_coverage_rate"] if key not in ["first_pts", "first_to_world_9d", "combined_scanned_pts"]:
best_seq_len = data_item_info["best_seq_len"] collate_data[key] = [item[key] for item in batch]
scene_name = data_item_info["scene_name"] return collate_data
( return collate_fn
scanned_views_pts,
scanned_coverages_rate, # -------------- Debug ---------------- #
scanned_n_to_world_pose, if __name__ == "__main__":
) = ([], [], []) import torch
view = data_item_info["first_frame"] seed = 0
frame_idx = view torch.manual_seed(seed)
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx) np.random.seed(seed)
cam_info = DataLoadUtil.load_cam_info(view_path, binocular=True) config = {
"root_dir": "/home/data/hofee/project/nbv_rec/data/nbv_rec_data_512_preproc_npy",
n_to_world_pose = cam_info["cam_to_world"] "split_file": "/home/data/hofee/project/nbv_rec/data/OmniObject3d_train.txt",
target_point_cloud = ( "model_dir": "/home/data/hofee/project/nbv_rec/data/scaled_object_meshes",
DataLoadUtil.load_from_preprocessed_pts(view_path) "ratio": 0.005,
) "batch_size": 2,
downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud( "filter_degree": 75,
target_point_cloud, self.pts_num "num_workers": 0,
) "pts_num": 32684,
scanned_views_pts.append(downsampled_target_point_cloud) "type": namespace.Mode.TEST,
"load_from_preprocess": True
n_to_world_6d = PoseUtil.matrix_to_rotation_6d_numpy( }
np.asarray(n_to_world_pose[:3, :3]) ds = SeqNBVReconstructionDataset(config)
) print(len(ds))
first_left_cam_pose = cam_info["cam_to_world"] #ds.__getitem__(10)
first_center_cam_pose = cam_info["cam_to_world_O"] dl = ds.get_loader(shuffle=True)
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose) for idx, data in enumerate(dl):
n_to_world_trans = n_to_world_pose[:3, 3] data = ds.process_batch(data, "cuda:0")
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0) print(data)
scanned_n_to_world_pose.append(n_to_world_9d) # ------ Debug Start ------
import ipdb;ipdb.set_trace()
frame_list = [] # ------ Debug End ------+
for i in range(DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)):
frame_list.append(i)
gt_pts = self.seq_combined_pts(scene_name, frame_list)
data_item = {
"first_scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
"first_scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
"seq_max_coverage_rate": max_coverage_rate, # Float, range(0, 1)
"best_seq_len": best_seq_len, # Int
"scene_name": scene_name, # String
"gt_pts": gt_pts, # Ndarray(N x 3)
"scene_path": os.path.join(self.root_dir, scene_name), # String
"O_to_L_pose": first_O_to_first_L_pose,
}
return data_item
def __len__(self):
return len(self.datalist)
# -------------- Debug ---------------- #
if __name__ == "__main__":
import torch
from tqdm import tqdm
import pickle
import os
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
config = {
"root_dir": "/media/hofee/data/data/test_bottle/view",
"source": "seq_reconstruction_dataset",
"split_file": "/media/hofee/data/data/test_bottle/test_bottle.txt",
"load_from_preprocess": True,
"filter_degree": 75,
"num_workers": 0,
"pts_num": 8192,
"type": namespace.Mode.TEST,
}
output_dir = "/media/hofee/data/data/test_bottle/preprocessed_dataset"
os.makedirs(output_dir, exist_ok=True)
ds = SeqReconstructionDataset(config)
for i in tqdm(range(len(ds)), desc="processing dataset"):
output_path = os.path.join(output_dir, f"item_{i}.pkl")
item = ds.__getitem__(i)
for key, value in item.items():
if isinstance(value, np.ndarray):
item[key] = value.tolist()
#import ipdb; ipdb.set_trace()
with open(output_path, "wb") as f:
pickle.dump(item, f)

View File

@@ -1,82 +0,0 @@
import numpy as np
from PytorchBoot.dataset import BaseDataset
import PytorchBoot.namespace as namespace
import PytorchBoot.stereotype as stereotype
from PytorchBoot.config import ConfigManager
from PytorchBoot.utils.log_util import Log
import pickle
import torch
import os
import sys
sys.path.append(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction")
from utils.data_load import DataLoadUtil
from utils.pose import PoseUtil
from utils.pts import PtsUtil
@stereotype.dataset("seq_reconstruction_dataset_preprocessed")
class SeqReconstructionDatasetPreprocessed(BaseDataset):
def __init__(self, config):
super(SeqReconstructionDatasetPreprocessed, self).__init__(config)
self.config = config
self.root_dir = config["root_dir"]
self.real_root_dir = r"/media/hofee/data/data/new_testset"
self.item_list = os.listdir(self.root_dir)
def __getitem__(self, index):
data = pickle.load(open(os.path.join(self.root_dir, self.item_list[index]), "rb"))
data_item = {
"first_scanned_pts": np.asarray(data["first_scanned_pts"], dtype=np.float32), # Ndarray(S x Nv x 3)
"first_scanned_n_to_world_pose_9d": np.asarray(data["first_scanned_n_to_world_pose_9d"], dtype=np.float32), # Ndarray(S x 9)
"seq_max_coverage_rate": data["seq_max_coverage_rate"], # Float, range(0, 1)
"best_seq_len": data["best_seq_len"], # Int
"scene_name": data["scene_name"], # String
"gt_pts": np.asarray(data["gt_pts"], dtype=np.float32), # Ndarray(N x 3)
"scene_path": os.path.join(self.real_root_dir, data["scene_name"]), # String
"O_to_L_pose": np.asarray(data["O_to_L_pose"], dtype=np.float32),
}
return data_item
def __len__(self):
return len(self.item_list)
# -------------- Debug ---------------- #
if __name__ == "__main__":
import torch
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
'''
OmniObject3d_test:
root_dir: "H:\\AI\\Datasets\\packed_test_data"
model_dir: "H:\\AI\\Datasets\\scaled_object_meshes"
source: seq_reconstruction_dataset
split_file: "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt"
type: test
filter_degree: 75
eval_list:
- pose_diff
- coverage_rate_increase
ratio: 0.1
batch_size: 1
num_workers: 12
pts_num: 8192
load_from_preprocess: True
'''
config = {
"root_dir": "/media/hofee/data/data/test_bottle/preprocessed_dataset",
"source": "seq_reconstruction_dataset",
"split_file": "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt",
"load_from_preprocess": True,
"ratio": 1,
"filter_degree": 75,
"num_workers": 0,
"pts_num": 8192,
"type": "test",
}
ds = SeqReconstructionDataset(config)
print(len(ds))
print(ds.__getitem__(10))

View File

@@ -1,162 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
from . import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True,
use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None,
bn: bool = True, use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super().__init__(
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz,
pool_method=pool_method, instance_norm=instance_norm
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
pass

View File

@@ -1,291 +0,0 @@
import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
import sys
import pointnet2_cuda as pointnet2
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features

View File

@@ -1,236 +0,0 @@
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = "",
instance_norm: bool = False,
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact,
instance_norm=instance_norm
)
)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name="",
instance_norm=False,
instance_norm_func=None
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if instance_norm:
if not preact:
in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)
else:
in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm1d
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm2d
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)

View File

@@ -1,148 +0,0 @@
import torch
import torch.nn as nn
import os
import sys
path = os.path.abspath(__file__)
for i in range(2):
path = os.path.dirname(path)
PROJECT_ROOT = path
sys.path.append(PROJECT_ROOT)
import PytorchBoot.stereotype as stereotype
from modules.module_lib.pointnet2_modules import PointnetSAModuleMSG
ClsMSG_CFG_Dense = {
'NPOINTS': [512, 256, 128, None],
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
'NSAMPLE': [[32, 64], [16, 32], [8, 16], [None, None]],
'MLPS': [[[16, 16, 32], [32, 32, 64]],
[[64, 64, 128], [64, 96, 128]],
[[128, 196, 256], [128, 196, 256]],
[[256, 256, 512], [256, 384, 512]]],
'DP_RATIO': 0.5,
}
ClsMSG_CFG_Light = {
'NPOINTS': [512, 256, 128, None],
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
'NSAMPLE': [[16, 32], [16, 32], [16, 32], [None, None]],
'MLPS': [[[16, 16, 32], [32, 32, 64]],
[[64, 64, 128], [64, 96, 128]],
[[128, 196, 256], [128, 196, 256]],
[[256, 256, 512], [256, 384, 512]]],
'DP_RATIO': 0.5,
}
ClsMSG_CFG_Light_2048 = {
'NPOINTS': [512, 256, 128, None],
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
'NSAMPLE': [[16, 32], [16, 32], [16, 32], [None, None]],
'MLPS': [[[16, 16, 32], [32, 32, 64]],
[[64, 64, 128], [64, 96, 128]],
[[128, 196, 256], [128, 196, 256]],
[[256, 256, 1024], [256, 512, 1024]]],
'DP_RATIO': 0.5,
}
ClsMSG_CFG_Strong = {
'NPOINTS': [1024, 512, 256, 128, None], # 增加采样点,获取更多细节
'RADIUS': [[0.02, 0.05], [0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [None, None]], # 增大感受野
'NSAMPLE': [[32, 64], [32, 64], [32, 64], [32, 64], [None, None]], # 提高每层的采样点数
'MLPS': [[[32, 32, 64], [64, 64, 128]], # 增强 MLP 层,增加特征提取能力
[[128, 128, 256], [128, 128, 256]],
[[256, 256, 512], [256, 384, 512]],
[[512, 512, 1024], [512, 768, 1024]],
[[1024, 1024, 2048], [1024, 1024, 2048]]], # 增加更深的特征层
'DP_RATIO': 0.4, # Dropout 比率稍微降低,以保留更多信息
}
ClsMSG_CFG_Lighter = {
'NPOINTS': [512, 256, 128, 64, None],
'RADIUS': [[0.01], [0.02], [0.04], [0.08], [None]],
'NSAMPLE': [[64], [32], [16], [8], [None]],
'MLPS': [[[32, 32, 64]],
[[64, 64, 128]],
[[128, 196, 256]],
[[256, 256, 512]],
[[512, 512, 1024]]],
'DP_RATIO': 0.5,
}
def select_params(name):
if name == 'light':
return ClsMSG_CFG_Light
elif name == 'lighter':
return ClsMSG_CFG_Lighter
elif name == 'dense':
return ClsMSG_CFG_Dense
elif name == 'light_2048':
return ClsMSG_CFG_Light_2048
elif name == 'strong':
return ClsMSG_CFG_Strong
else:
raise NotImplementedError
def break_up_pc(pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
@stereotype.module("pointnet++_encoder")
class PointNet2Encoder(nn.Module):
def encode_points(self, pts, require_per_point_feat=False):
return self.forward(pts)
def __init__(self, config:dict):
super().__init__()
channel_in = config.get("in_dim", 3) - 3
params_name = config.get("params_name", "light")
self.SA_modules = nn.ModuleList()
selected_params = select_params(params_name)
for k in range(selected_params['NPOINTS'].__len__()):
mlps = selected_params['MLPS'][k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=selected_params['NPOINTS'][k],
radii=selected_params['RADIUS'][k],
nsamples=selected_params['NSAMPLE'][k],
mlps=mlps,
use_xyz=True,
bn=True
)
)
channel_in = channel_out
def forward(self, point_cloud: torch.cuda.FloatTensor):
xyz, features = break_up_pc(point_cloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
return l_features[-1].squeeze(-1)
if __name__ == '__main__':
seed = 100
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
net = PointNet2Encoder(config={"in_dim": 3, "params_name": "strong"}).cuda()
pts = torch.randn(2, 2444, 3).cuda()
print(torch.mean(pts, dim=1))
pre = net.encode_points(pts)
print(pre.shape)

View File

@@ -164,10 +164,10 @@ def save_scene_data(root, scene, scene_idx=0, scene_total=1,file_type="txt"):
if __name__ == "__main__": if __name__ == "__main__":
#root = "/media/hofee/repository/new_data_with_normal" #root = "/media/hofee/repository/new_data_with_normal"
root = r"/media/hofee/data/data/test_bottle/view" root = r"H:\AI\Datasets\nbv_rec_part2"
scene_list = os.listdir(root) scene_list = os.listdir(root)
from_idx = 0 # 1000 from_idx = 0 # 1000
to_idx = len(scene_list) # 1500 to_idx = 600 # 1500
cnt = 0 cnt = 0

View File

@@ -12,9 +12,8 @@ from PytorchBoot.runners.runner import Runner
from PytorchBoot.utils import Log from PytorchBoot.utils import Log
from utils.pts import PtsUtil from utils.pts import PtsUtil
from beans.predict_result import PredictResult
@stereotype.runner("inferencer_server") @stereotype.runner("inferencer")
class InferencerServer(Runner): class InferencerServer(Runner):
def __init__(self, config_path): def __init__(self, config_path):
super().__init__(config_path) super().__init__(config_path)
@@ -25,45 +24,40 @@ class InferencerServer(Runner):
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE] self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name) self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
self.pipeline = self.pipeline.to(self.device) self.pipeline = self.pipeline.to(self.device)
self.pts_num = 8192
self.voxel_size = 0.002
''' Experiment ''' ''' Experiment '''
self.load_experiment("inferencer_server") self.load_experiment("nbv_evaluator")
def get_input_data(self, data): def get_input_data(self, data):
input_data = {} input_data = {}
scanned_pts = data["scanned_pts"] scanned_pts = data["scanned_pts"]
scanned_n_to_world_pose_9d = data["scanned_n_to_world_pose_9d"] scanned_n_to_world_pose_9d = data["scanned_n_to_world_pose_9d"]
combined_scanned_views_pts = np.concatenate(scanned_pts, axis=0) combined_scanned_views_pts = np.concatenate(scanned_pts, axis=0)
voxel_downsampled_combined_scanned_pts = PtsUtil.voxel_downsample_point_cloud(
combined_scanned_views_pts, self.voxel_size
)
fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud( fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud(
voxel_downsampled_combined_scanned_pts, self.pts_num, require_idx=True combined_scanned_views_pts, self.pts_num, require_idx=True
) )
combined_scanned_views_pts_mask = np.zeros(len(scanned_pts), dtype=np.uint8)
start_idx = 0
for i in range(len(scanned_pts)):
end_idx = start_idx + len(scanned_pts[i])
combined_scanned_views_pts_mask[start_idx:end_idx] = i
start_idx = end_idx
input_data["scanned_pts"] = scanned_pts fps_downsampled_combined_scanned_pts_mask = combined_scanned_views_pts_mask[fps_idx]
input_data["scanned_pts_mask"] = np.asarray(fps_downsampled_combined_scanned_pts_mask, dtype=np.uint8)
input_data["scanned_n_to_world_pose_9d"] = np.asarray(scanned_n_to_world_pose_9d, dtype=np.float32) input_data["scanned_n_to_world_pose_9d"] = np.asarray(scanned_n_to_world_pose_9d, dtype=np.float32)
input_data["combined_scanned_pts"] = np.asarray(fps_downsampled_combined_scanned_pts, dtype=np.float32) input_data["combined_scanned_pts"] = np.asarray(fps_downsampled_combined_scanned_pts, dtype=np.float32)
return input_data return input_data
def get_result(self, output_data): def get_result(self, output_data):
pred_pose_9d = output_data["pred_pose_9d"] estimated_delta_rot_9d = output_data["pred_pose_9d"]
pred_pose_9d = np.asarray(PredictResult(pred_pose_9d.cpu().numpy(), None, cluster_params=dict(eps=0.25, min_samples=3)).candidate_9d_poses, dtype=np.float32)
result = { result = {
"pred_pose_9d": pred_pose_9d.tolist() "estimated_delta_rot_9d": estimated_delta_rot_9d.tolist()
} }
return result return result
def collate_input(self, input_data):
collated_input_data = {}
collated_input_data["scanned_pts"] = [torch.tensor(input_data["scanned_pts"], dtype=torch.float32, device=self.device)]
collated_input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(input_data["scanned_n_to_world_pose_9d"], dtype=torch.float32, device=self.device)]
collated_input_data["combined_scanned_pts"] = torch.tensor(input_data["combined_scanned_pts"], dtype=torch.float32, device=self.device).unsqueeze(0)
return collated_input_data
def run(self): def run(self):
Log.info("Loading from epoch {}.".format(self.current_epoch)) Log.info("Loading from epoch {}.".format(self.current_epoch))
@@ -71,8 +65,7 @@ class InferencerServer(Runner):
def inference(): def inference():
data = request.json data = request.json
input_data = self.get_input_data(data) input_data = self.get_input_data(data)
collated_input_data = self.collate_input(input_data) output_data = self.pipeline.forward_test(input_data)
output_data = self.pipeline.forward_test(collated_input_data)
result = self.get_result(output_data) result = self.get_result(output_data)
return jsonify(result) return jsonify(result)

View File

@@ -4,7 +4,6 @@ from utils.render import RenderUtil
from utils.pose import PoseUtil from utils.pose import PoseUtil
from utils.pts import PtsUtil from utils.pts import PtsUtil
from utils.reconstruction import ReconstructionUtil from utils.reconstruction import ReconstructionUtil
from beans.predict_result import PredictResult
import torch import torch
from tqdm import tqdm from tqdm import tqdm
@@ -20,19 +19,14 @@ from PytorchBoot.dataset import BaseDataset
from PytorchBoot.runners.runner import Runner from PytorchBoot.runners.runner import Runner
from PytorchBoot.utils import Log from PytorchBoot.utils import Log
from PytorchBoot.status import status_manager from PytorchBoot.status import status_manager
from utils.data_load import DataLoadUtil
@stereotype.runner("inferencer") @stereotype.runner("inferencer")
class Inferencer(Runner): class Inferencer(Runner):
def __init__(self, config_path): def __init__(self, config_path):
super().__init__(config_path) super().__init__(config_path)
self.script_path = ConfigManager.get(namespace.Stereotype.RUNNER, "blender_script_path") self.script_path = ConfigManager.get(namespace.Stereotype.RUNNER, "blender_script_path")
self.output_dir = ConfigManager.get(namespace.Stereotype.RUNNER, "output_dir") self.output_dir = ConfigManager.get(namespace.Stereotype.RUNNER, "output_dir")
self.voxel_size = ConfigManager.get(namespace.Stereotype.RUNNER, "voxel_size")
self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area")
CM = 0.01
self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) **2
''' Pipeline ''' ''' Pipeline '''
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE] self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name) self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
@@ -40,12 +34,7 @@ class Inferencer(Runner):
''' Experiment ''' ''' Experiment '''
self.load_experiment("nbv_evaluator") self.load_experiment("nbv_evaluator")
self.stat_result_path = os.path.join(self.output_dir, "stat.json") self.stat_result = {}
if os.path.exists(self.stat_result_path):
with open(self.stat_result_path, "r") as f:
self.stat_result = json.load(f)
else:
self.stat_result = {}
''' Test ''' ''' Test '''
self.test_config = ConfigManager.get(namespace.Stereotype.RUNNER, namespace.Mode.TEST) self.test_config = ConfigManager.get(namespace.Stereotype.RUNNER, namespace.Mode.TEST)
@@ -76,182 +65,128 @@ class Inferencer(Runner):
for dataset_idx, test_set in enumerate(self.test_set_list): for dataset_idx, test_set in enumerate(self.test_set_list):
status_manager.set_progress("inference", "inferencer", f"dataset", dataset_idx, len(self.test_set_list)) status_manager.set_progress("inference", "inferencer", f"dataset", dataset_idx, len(self.test_set_list))
test_set_name = test_set.get_name() test_set_name = test_set.get_name()
test_loader = test_set.get_loader()
total=int(len(test_set)) if test_loader.batch_size > 1:
for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100): Log.error("Batch size should be 1 for inference, found {} in {}".format(test_loader.batch_size, test_set_name), terminate=True)
try:
data = test_set.__getitem__(i) total=int(len(test_loader))
scene_name = data["scene_name"] loop = tqdm(enumerate(test_loader), total=total)
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl") for i, data in loop:
status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total)
if os.path.exists(inference_result_path): test_set.process_batch(data, self.device)
Log.info(f"Inference result already exists for scene: {scene_name}") output = self.predict_sequence(data)
continue self.save_inference_result(test_set_name, data["scene_name"][0], output)
status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total)
output = self.predict_sequence(data)
self.save_inference_result(test_set_name, data["scene_name"], output)
except Exception as e:
Log.error(f"Error in scene {scene_name}, {e}")
continue
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list)) status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 10, max_success=3): def predict_sequence(self, data, cr_increase_threshold=0, max_iter=50, max_retry=5):
scene_name = data["scene_name"] scene_name = data["scene_name"][0]
Log.info(f"Processing scene: {scene_name}") Log.info(f"Processing scene: {scene_name}")
status_manager.set_status("inference", "inferencer", "scene", scene_name) status_manager.set_status("inference", "inferencer", "scene", scene_name)
''' data for rendering ''' ''' data for rendering '''
scene_path = data["scene_path"] scene_path = data["scene_path"][0]
O_to_L_pose = data["O_to_L_pose"] O_to_L_pose = data["O_to_L_pose"][0]
voxel_threshold = self.voxel_size voxel_threshold = data["voxel_threshold"][0]
filter_degree = 75 filter_degree = data["filter_degree"][0]
down_sampled_model_pts = data["gt_pts"] model_points_normals = data["model_points_normals"][0]
model_pts = model_points_normals[:,:3]
first_frame_to_world_9d = data["first_scanned_n_to_world_pose_9d"][0] down_sampled_model_pts = PtsUtil.voxel_downsample_point_cloud(model_pts, voxel_threshold)
first_frame_to_world = np.eye(4) first_frame_to_world_9d = data["first_to_world_9d"][0]
first_frame_to_world[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(first_frame_to_world_9d[:6]) first_frame_to_world = torch.eye(4, device=first_frame_to_world_9d.device)
first_frame_to_world[:3,3] = first_frame_to_world_9d[6:] first_frame_to_world[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(first_frame_to_world_9d[:,:6])[0]
first_frame_to_world[:3,3] = first_frame_to_world_9d[0,6:]
first_frame_to_world = first_frame_to_world.to(self.device)
''' data for inference ''' ''' data for inference '''
input_data = {} input_data = {}
input_data["combined_scanned_pts"] = torch.tensor(data["first_scanned_pts"][0], dtype=torch.float32).to(self.device).unsqueeze(0) input_data["scanned_pts"] = [data["first_pts"][0].to(self.device)]
input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)] input_data["scanned_n_to_world_pose_9d"] = [data["first_to_world_9d"][0].to(self.device)]
input_data["mode"] = namespace.Mode.TEST input_data["mode"] = namespace.Mode.TEST
input_pts_N = input_data["combined_scanned_pts"].shape[1] input_data["combined_scanned_pts"] = data["combined_scanned_pts"]
input_pts_N = input_data["scanned_pts"][0].shape[1]
root = os.path.dirname(scene_path) first_frame_target_pts, _ = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, model_points_normals, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
display_table_info = DataLoadUtil.get_display_table_info(root, scene_name)
radius = display_table_info["radius"]
scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius))
first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
scanned_view_pts = [first_frame_target_pts] scanned_view_pts = [first_frame_target_pts]
history_indices = [first_frame_scan_points_indices] last_pred_cr = self.compute_coverage_rate(scanned_view_pts, None, down_sampled_model_pts, threshold=voxel_threshold)
last_pred_cr, added_pts_num = self.compute_coverage_rate(scanned_view_pts, None, down_sampled_model_pts, threshold=voxel_threshold)
retry_duplication_pose = [] retry_duplication_pose = []
retry_no_pts_pose = [] retry_no_pts_pose = []
retry_overlap_pose = []
retry = 0 retry = 0
pred_cr_seq = [last_pred_cr] pred_cr_seq = [last_pred_cr]
success = 0 while len(pred_cr_seq) < max_iter and retry < max_retry:
last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], voxel_threshold).shape[0]
import time
while len(pred_cr_seq) < max_iter and retry < max_retry and success < max_success:
Log.green(f"iter: {len(pred_cr_seq)}, retry: {retry}/{max_retry}, success: {success}/{max_success}")
combined_scanned_pts = np.vstack(scanned_view_pts)
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold)
output = self.pipeline(input_data) output = self.pipeline(input_data)
pred_pose_9d = output["pred_pose_9d"] pred_pose_9d = output["pred_pose_9d"]
pred_pose = torch.eye(4, device=pred_pose_9d.device) pred_pose = torch.eye(4, device=pred_pose_9d.device)
# # save pred_pose_9d ------
# root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result"
# scene_dir = os.path.join(root, scene_name)
# if not os.path.exists(scene_dir):
# os.makedirs(scene_dir)
# pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy")
# pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt")
# np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy()
# np.save(pred_9d_path, pred_pose_9d.cpu().numpy())
# np.savetxt(pts_path, np_combined_scanned_pts)
# # ----- ----- -----
predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3))
# -----------------------
# import ipdb; ipdb.set_trace()
# predict_result.visualize()
# -----------------------
pred_pose_9d_candidates = predict_result.candidate_9d_poses
for pred_pose_9d in pred_pose_9d_candidates:
#import ipdb; ipdb.set_trace()
pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0)
pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9d[:,:6])[0]
pred_pose[:3,3] = pred_pose_9d[0,6:]
try:
new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
#import ipdb; ipdb.set_trace()
if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold):
curr_overlap_area_threshold = overlap_area_threshold
else:
curr_overlap_area_threshold = overlap_area_threshold * 0.5
downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9d[:,:6])[0]
overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True) pred_pose[:3,3] = pred_pose_9d[0,6:]
if not overlap:
Log.yellow("no overlap!")
retry += 1
retry_overlap_pose.append(pred_pose.cpu().numpy().tolist())
continue
history_indices.append(new_scan_points_indices)
except Exception as e:
Log.error(f"Error in scene {scene_path}, {e}")
print("current pose: ", pred_pose)
print("curr_pred_cr: ", last_pred_cr)
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
continue
if new_target_pts.shape[0] == 0:
Log.red("no pts in new target")
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
continue
pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
Log.yellow(f"{pred_cr}, {last_pred_cr}, max: , {data['seq_max_coverage_rate']}")
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
print("max coverage rate reached!: ", pred_cr)
try:
pred_cr_seq.append(pred_cr) new_target_pts_world, new_pts_world = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, model_points_normals, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose, require_full_scene=True)
scanned_view_pts.append(new_target_pts) except Exception as e:
Log.warning(f"Error in scene {scene_path}, {e}")
input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)] print("current pose: ", pred_pose)
print("curr_pred_cr: ", last_pred_cr)
combined_scanned_pts = np.vstack(scanned_view_pts) retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, voxel_threshold) retry += 1
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N) continue
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
last_pred_cr = pred_cr
pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0]
Log.info(f"delta pts num:,{pts_num - last_pts_num },{pts_num}, {last_pts_num}")
if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2:
retry += 1
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2:
success += 1
Log.success(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
last_pts_num = pts_num
break
pred_cr = self.compute_coverage_rate(scanned_view_pts, new_target_pts_world, down_sampled_model_pts, threshold=voxel_threshold)
print(pred_cr, last_pred_cr, " max: ", data["max_coverage_rate"])
if pred_cr >= data["max_coverage_rate"]:
print("max coverage rate reached!")
if pred_cr <= last_pred_cr + cr_increase_threshold:
retry += 1
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
continue
retry = 0
pred_cr_seq.append(pred_cr)
scanned_view_pts.append(new_target_pts_world)
down_sampled_new_pts_world = PtsUtil.random_downsample_point_cloud(new_pts_world, input_pts_N)
new_pts_world_aug = np.hstack([down_sampled_new_pts_world, np.ones((down_sampled_new_pts_world.shape[0], 1))])
new_pts = np.dot(np.linalg.inv(first_frame_to_world.cpu()), new_pts_world_aug.T).T[:,:3]
new_pts_tensor = torch.tensor(new_pts, dtype=torch.float32).unsqueeze(0).to(self.device)
input_data["scanned_pts"] = [torch.cat([input_data["scanned_pts"][0] , new_pts_tensor], dim=0)]
input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)]
combined_scanned_views_pts = np.concatenate(input_data["scanned_pts"][0].tolist(), axis=0)
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_views_pts, 0.002)
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N)
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
last_pred_cr = pred_cr
input_data["scanned_pts"] = input_data["scanned_pts"][0].cpu().numpy().tolist()
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist() input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()
result = { result = {
"pred_pose_9d_seq": input_data["scanned_n_to_world_pose_9d"], "pred_pose_9d_seq": input_data["scanned_n_to_world_pose_9d"],
"combined_scanned_pts": input_data["combined_scanned_pts"], "pts_seq": input_data["scanned_pts"],
"target_pts_seq": scanned_view_pts, "target_pts_seq": scanned_view_pts,
"coverage_rate_seq": pred_cr_seq, "coverage_rate_seq": pred_cr_seq,
"max_coverage_rate": data["seq_max_coverage_rate"], "max_coverage_rate": data["max_coverage_rate"][0],
"pred_max_coverage_rate": max(pred_cr_seq), "pred_max_coverage_rate": max(pred_cr_seq),
"scene_name": scene_name, "scene_name": scene_name,
"retry_no_pts_pose": retry_no_pts_pose, "retry_no_pts_pose": retry_no_pts_pose,
"retry_duplication_pose": retry_duplication_pose, "retry_duplication_pose": retry_duplication_pose,
"retry_overlap_pose": retry_overlap_pose, "best_seq_len": data["best_seq_len"][0],
"best_seq_len": data["best_seq_len"],
} }
self.stat_result[scene_name] = { self.stat_result[scene_name] = {
"max_coverage_rate": data["max_coverage_rate"][0],
"success_rate": max(pred_cr_seq)/ data["max_coverage_rate"][0],
"coverage_rate_seq": pred_cr_seq, "coverage_rate_seq": pred_cr_seq,
"pred_max_coverage_rate": max(pred_cr_seq), "pred_max_coverage_rate": max(pred_cr_seq),
"pred_seq_len": len(pred_cr_seq), "pred_seq_len": len(pred_cr_seq),
} }
print('success rate: ', max(pred_cr_seq)) print('success rate: ', max(pred_cr_seq) / data["max_coverage_rate"][0])
return result return result
@@ -263,14 +198,7 @@ class Inferencer(Runner):
combined_point_cloud = np.vstack(new_scanned_view_pts) combined_point_cloud = np.vstack(new_scanned_view_pts)
down_sampled_combined_point_cloud = PtsUtil.voxel_downsample_point_cloud(combined_point_cloud,threshold) down_sampled_combined_point_cloud = PtsUtil.voxel_downsample_point_cloud(combined_point_cloud,threshold)
return ReconstructionUtil.compute_coverage_rate(model_pts, down_sampled_combined_point_cloud, threshold) return ReconstructionUtil.compute_coverage_rate(model_pts, down_sampled_combined_point_cloud, threshold)
def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
idx_sort = np.argsort(inverse)
idx_unique = idx_sort[np.cumsum(counts)-counts]
downsampled_points = point_cloud[idx_unique]
return downsampled_points, inverse
def save_inference_result(self, dataset_name, scene_name, output): def save_inference_result(self, dataset_name, scene_name, output):
dataset_dir = os.path.join(self.output_dir, dataset_name) dataset_dir = os.path.join(self.output_dir, dataset_name)
@@ -278,7 +206,7 @@ class Inferencer(Runner):
os.makedirs(dataset_dir) os.makedirs(dataset_dir)
output_path = os.path.join(dataset_dir, f"{scene_name}.pkl") output_path = os.path.join(dataset_dir, f"{scene_name}.pkl")
pickle.dump(output, open(output_path, "wb")) pickle.dump(output, open(output_path, "wb"))
with open(self.stat_result_path, "w") as f: with open(os.path.join(dataset_dir, "stat.json"), "w") as f:
json.dump(self.stat_result, f) json.dump(self.stat_result, f)

View File

@@ -24,6 +24,8 @@ class DataLoadUtil:
for channel in float_channels: for channel in float_channels:
channel_data = exr_file.channel(channel) channel_data = exr_file.channel(channel)
img_data.append(np.frombuffer(channel_data, dtype=np.float16).reshape((height, width))) img_data.append(np.frombuffer(channel_data, dtype=np.float16).reshape((height, width)))
# 将各通道组合成一个 (height, width, 3) 的 RGB 图像
img = np.stack(img_data, axis=-1) img = np.stack(img_data, axis=-1)
return img return img

View File

@@ -14,27 +14,38 @@ class PtsUtil:
downsampled_points = point_cloud[idx_unique] downsampled_points = point_cloud[idx_unique]
return downsampled_points, idx_unique return downsampled_points, idx_unique
else: else:
unique_voxels = np.unique(voxel_indices, axis=0, return_inverse=True) import ipdb; ipdb.set_trace()
return unique_voxels[0]*voxel_size unique_voxels = np.unique(voxel_indices, axis=0, return_inverse=False)
return unique_voxels*voxel_size
@staticmethod @staticmethod
def voxel_downsample_point_cloud_random(point_cloud, voxel_size=0.005, require_idx=False): def voxel_downsample_point_cloud_o3d(point_cloud, voxel_size=0.005):
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32) pcd = o3d.geometry.PointCloud()
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True) pcd.points = o3d.utility.Vector3dVector(point_cloud)
idx_sort = np.argsort(inverse) pcd = pcd.voxel_down_sample(voxel_size)
idx_unique = idx_sort[np.cumsum(counts)-counts] return np.asarray(pcd.points)
downsampled_points = point_cloud[idx_unique]
if require_idx:
return downsampled_points, inverse
return downsampled_points
@staticmethod @staticmethod
def random_downsample_point_cloud(point_cloud, num_points, require_idx=False): def voxel_downsample_point_cloud_and_trace_o3d(point_cloud, voxel_size=0.005):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point_cloud)
max_bound = pcd.get_max_bound()
min_bound = pcd.get_min_bound()
pcd = pcd.voxel_down_sample_and_trace(voxel_size, max_bound, min_bound, True)
return np.asarray(pcd.points)
@staticmethod
def random_downsample_point_cloud(point_cloud, num_points, require_idx=False, replace=True):
if point_cloud.shape[0] == 0: if point_cloud.shape[0] == 0:
if require_idx: if require_idx:
return point_cloud, np.array([]) return point_cloud, np.array([])
return point_cloud return point_cloud
idx = np.random.choice(len(point_cloud), num_points, replace=True) if not replace and num_points > len(point_cloud):
if require_idx:
return point_cloud, np.arange(len(point_cloud))
return point_cloud
idx = np.random.choice(len(point_cloud), num_points, replace=replace)
if require_idx: if require_idx:
return point_cloud[idx], idx return point_cloud[idx], idx
return point_cloud[idx] return point_cloud[idx]

View File

@@ -32,15 +32,13 @@ class ReconstructionUtil:
@staticmethod @staticmethod
def check_overlap(new_point_cloud, combined_point_cloud, overlap_area_threshold=25, voxel_size=0.01, require_new_added_pts_num=False): def check_overlap(new_point_cloud, combined_point_cloud, overlap_area_threshold=25, voxel_size=0.01):
kdtree = cKDTree(combined_point_cloud) kdtree = cKDTree(combined_point_cloud)
distances, _ = kdtree.query(new_point_cloud) distances, _ = kdtree.query(new_point_cloud)
overlapping_points_num = np.sum(distances < voxel_size*2) overlapping_points = np.sum(distances < voxel_size*2)
cm = 0.01 cm = 0.01
voxel_size_cm = voxel_size / cm voxel_size_cm = voxel_size / cm
overlap_area = overlapping_points_num * voxel_size_cm * voxel_size_cm overlap_area = overlapping_points * voxel_size_cm * voxel_size_cm
if require_new_added_pts_num:
return overlap_area > overlap_area_threshold, len(new_point_cloud)-np.sum(distances < voxel_size*1.2)
return overlap_area > overlap_area_threshold return overlap_area > overlap_area_threshold

View File

@@ -1,75 +1,16 @@
import os import os
import json import json
import time
import subprocess import subprocess
import tempfile import tempfile
import shutil import shutil
import numpy as np
from utils.data_load import DataLoadUtil from utils.data_load import DataLoadUtil
from utils.reconstruction import ReconstructionUtil from utils.reconstruction import ReconstructionUtil
from utils.pts import PtsUtil from utils.pts import PtsUtil
class RenderUtil: class RenderUtil:
target_mask_label = (0, 255, 0)
display_table_mask_label = (0, 0, 255)
random_downsample_N = 32768
min_z = 0.2
max_z = 0.5
@staticmethod @staticmethod
def get_world_points_and_normal(depth, mask, normal, cam_intrinsic, cam_extrinsic, random_downsample_N): def render_pts(cam_pose, scene_path, script_path, model_points_normals, voxel_threshold=0.005, filter_degree=75, nO_to_nL_pose=None, require_full_scene=False):
z = depth[mask]
i, j = np.nonzero(mask)
x = (j - cam_intrinsic[0, 2]) * z / cam_intrinsic[0, 0]
y = (i - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
normal_camera = normal[mask].reshape(-1, 3)
sampled_target_points, idx = PtsUtil.random_downsample_point_cloud(
points_camera, random_downsample_N, require_idx=True
)
if len(sampled_target_points) == 0:
return np.zeros((0, 3)), np.zeros((0, 3))
sampled_normal_camera = normal_camera[idx]
points_camera_aug = np.concatenate((sampled_target_points, np.ones((sampled_target_points.shape[0], 1))), axis=-1)
points_camera_world = np.dot(cam_extrinsic, points_camera_aug.T).T[:, :3]
return points_camera_world, sampled_normal_camera
@staticmethod
def get_world_points(depth, mask, cam_intrinsic, cam_extrinsic, random_downsample_N):
z = depth[mask]
i, j = np.nonzero(mask)
x = (j - cam_intrinsic[0, 2]) * z / cam_intrinsic[0, 0]
y = (i - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
sampled_target_points = PtsUtil.random_downsample_point_cloud(
points_camera, random_downsample_N
)
points_camera_aug = np.concatenate((sampled_target_points, np.ones((sampled_target_points.shape[0], 1))), axis=-1)
points_camera_world = np.dot(cam_extrinsic, points_camera_aug.T).T[:, :3]
return points_camera_world
@staticmethod
def get_scan_points_indices(scan_points, mask, display_table_mask_label, cam_intrinsic, cam_extrinsic):
scan_points_homogeneous = np.hstack((scan_points, np.ones((scan_points.shape[0], 1))))
points_camera = np.dot(np.linalg.inv(cam_extrinsic), scan_points_homogeneous.T).T[:, :3]
points_image_homogeneous = np.dot(cam_intrinsic, points_camera.T).T
points_image_homogeneous /= points_image_homogeneous[:, 2:]
pixel_x = points_image_homogeneous[:, 0].astype(int)
pixel_y = points_image_homogeneous[:, 1].astype(int)
h, w = mask.shape[:2]
valid_indices = (pixel_x >= 0) & (pixel_x < w) & (pixel_y >= 0) & (pixel_y < h)
mask_colors = mask[pixel_y[valid_indices], pixel_x[valid_indices]]
selected_points_indices = np.where((mask_colors == display_table_mask_label).all(axis=-1))[0]
selected_points_indices = np.where(valid_indices)[0][selected_points_indices]
return selected_points_indices
@staticmethod
def render_pts(cam_pose, scene_path, script_path, scan_points, voxel_threshold=0.005, filter_degree=75, nO_to_nL_pose=None, require_full_scene=False):
nO_to_world_pose = DataLoadUtil.get_real_cam_O_from_cam_L(cam_pose, nO_to_nL_pose, scene_path=scene_path) nO_to_world_pose = DataLoadUtil.get_real_cam_O_from_cam_L(cam_pose, nO_to_nL_pose, scene_path=scene_path)
@@ -83,54 +24,29 @@ class RenderUtil:
shutil.copy(scene_info_path, os.path.join(temp_dir, "scene_info.json")) shutil.copy(scene_info_path, os.path.join(temp_dir, "scene_info.json"))
params_data_path = os.path.join(temp_dir, "params.json") params_data_path = os.path.join(temp_dir, "params.json")
with open(params_data_path, 'w') as f: with open(params_data_path, 'w') as f:
json.dump(params, f) json.dump(params, f)
result = subprocess.run([ result = subprocess.run([
'/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', script_path, '--', temp_dir 'blender', '-b', '-P', script_path, '--', temp_dir
], capture_output=True, text=True) ], capture_output=True, text=True)
#print(result) if result.returncode != 0:
#import ipdb; ipdb.set_trace() print("Blender script failed:")
print(result.stderr)
return None
path = os.path.join(temp_dir, "tmp") path = os.path.join(temp_dir, "tmp")
cam_info = DataLoadUtil.load_cam_info(path, binocular=True) point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(path, binocular=True)
depth_L, depth_R = DataLoadUtil.load_depth( cam_params = DataLoadUtil.load_cam_info(path, binocular=True)
path, cam_info["near_plane"],
cam_info["far_plane"], ''' TODO: old code: filter_points api is changed, need to update the code '''
binocular=True filtered_point_cloud = PtsUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=filter_degree)
) full_scene_point_cloud = None
mask_L, mask_R = DataLoadUtil.load_seg(path, binocular=True) if require_full_scene:
normal_L = DataLoadUtil.load_normal(path, binocular=True, left_only=True) depth_L, depth_R = DataLoadUtil.load_depth(path, cam_params['near_plane'], cam_params['far_plane'], binocular=True)
''' target points ''' point_cloud_L = DataLoadUtil.get_point_cloud(depth_L, cam_params['cam_intrinsic'], cam_params['cam_to_world'])['points_world']
mask_img_L = mask_L point_cloud_R = DataLoadUtil.get_point_cloud(depth_R, cam_params['cam_intrinsic'], cam_params['cam_to_world_R'])['points_world']
mask_img_R = mask_R
point_cloud_L = PtsUtil.random_downsample_point_cloud(point_cloud_L, 65536)
target_mask_img_L = (mask_L == RenderUtil.target_mask_label).all(axis=-1) point_cloud_R = PtsUtil.random_downsample_point_cloud(point_cloud_R, 65536)
target_mask_img_R = (mask_R == RenderUtil.target_mask_label).all(axis=-1) full_scene_point_cloud = PtsUtil.get_overlapping_points(point_cloud_L, point_cloud_R)
sampled_target_points_L, sampled_target_normal_L = RenderUtil.get_world_points_and_normal(depth_L,target_mask_img_L,normal_L, cam_info["cam_intrinsic"], cam_info["cam_to_world"], RenderUtil.random_downsample_N) return filtered_point_cloud, full_scene_point_cloud
sampled_target_points_R = RenderUtil.get_world_points(depth_R, target_mask_img_R, cam_info["cam_intrinsic"], cam_info["cam_to_world_R"], RenderUtil.random_downsample_N )
has_points = sampled_target_points_L.shape[0] > 0 and sampled_target_points_R.shape[0] > 0
if has_points:
target_points, overlap_idx = PtsUtil.get_overlapping_points(
sampled_target_points_L, sampled_target_points_R, voxel_threshold, require_idx=True
)
sampled_target_normal_L = sampled_target_normal_L[overlap_idx]
if has_points:
has_points = target_points.shape[0] > 0
if has_points:
target_points, target_normals = PtsUtil.filter_points(
target_points, sampled_target_normal_L, cam_info["cam_to_world"], theta_limit = filter_degree, z_range=(RenderUtil.min_z, RenderUtil.max_z)
)
scan_points_indices_L = RenderUtil.get_scan_points_indices(scan_points, mask_img_L, RenderUtil.display_table_mask_label, cam_info["cam_intrinsic"], cam_info["cam_to_world"])
scan_points_indices_R = RenderUtil.get_scan_points_indices(scan_points, mask_img_R, RenderUtil.display_table_mask_label, cam_info["cam_intrinsic"], cam_info["cam_to_world_R"])
scan_points_indices = np.intersect1d(scan_points_indices_L, scan_points_indices_R)
if not has_points:
target_points = np.zeros((0, 3))
target_normals = np.zeros((0, 3))
#import ipdb; ipdb.set_trace()
return target_points, target_normals, scan_points_indices

View File

@@ -7,7 +7,6 @@ import trimesh
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.data_load import DataLoadUtil from utils.data_load import DataLoadUtil
from utils.pts import PtsUtil from utils.pts import PtsUtil
from utils.pose import PoseUtil
class visualizeUtil: class visualizeUtil:
@@ -34,22 +33,7 @@ class visualizeUtil:
all_cam_axis = np.array(all_cam_axis).reshape(-1, 3) all_cam_axis = np.array(all_cam_axis).reshape(-1, 3)
np.savetxt(os.path.join(output_dir, "all_cam_pos.txt"), all_cam_pos) np.savetxt(os.path.join(output_dir, "all_cam_pos.txt"), all_cam_pos)
np.savetxt(os.path.join(output_dir, "all_cam_axis.txt"), all_cam_axis) np.savetxt(os.path.join(output_dir, "all_cam_axis.txt"), all_cam_axis)
@staticmethod
def get_cam_pose_and_cam_axis(cam_pose, is_6d_pose):
if is_6d_pose:
matrix_cam_pose = np.eye(4)
matrix_cam_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(cam_pose[:6])
matrix_cam_pose[:3, 3] = cam_pose[6:]
else:
matrix_cam_pose = cam_pose
cam_pos = matrix_cam_pose[:3, 3]
cam_axis = matrix_cam_pose[:3, 2]
num_samples = 10
sample_points = [cam_pos + 0.02*t * cam_axis for t in range(num_samples)]
sample_points = np.array(sample_points)
return cam_pos, sample_points
@staticmethod @staticmethod
def save_all_combined_pts(root, scene, output_dir): def save_all_combined_pts(root, scene, output_dir):
length = DataLoadUtil.get_scene_seq_length(root, scene) length = DataLoadUtil.get_scene_seq_length(root, scene)