14 Commits

Author SHA1 Message Date
7c7f071f95 update 2025-04-12 04:05:51 +08:00
1a0e3c8042 sim control 2025-04-09 15:17:24 +08:00
2fcc650eb7 solve conflicts 2025-03-13 14:49:35 +08:00
b20fa8bb75 update strong pointnet++ 2025-03-13 14:48:15 +08:00
d7fb64ed13 update strong p++ 2025-01-23 08:58:10 +00:00
5a03659112 update inference server 2025-01-07 19:32:02 +08:00
fca984e76b Merge branch 'ab_global_only' of http://git.hofee.top/hofee/nbv_reconstruction into ab_global_only 2025-01-05 23:57:43 +08:00
dec67e8255 upd inference 2025-01-05 23:57:33 +08:00
1535a48a3f upd cluster inference 2025-01-05 15:50:04 +00:00
9c2625b11e upd 2024-12-31 02:52:46 +08:00
2dfb6c57ce upd 2024-12-31 02:51:42 +08:00
88d44f020e train pointnet++ 2024-12-30 14:00:53 +00:00
34548c64a3 deploy pointnet++ finished 2024-12-28 19:50:22 +00:00
47ea0ac434 deploy pointnet++ again 2024-12-28 19:38:27 +00:00
46 changed files with 969 additions and 1823 deletions

11
app_sim.py Normal file
View File

@@ -0,0 +1,11 @@
from PytorchBoot.application import PytorchBootApplication
from runners.simulator import Simulator
@PytorchBootApplication("sim")
class SimulateApp:
@staticmethod
def start():
simulator = Simulator("configs/local/simulation_config.yaml")
simulator.run("create")
simulator.run("simulate")

162
beans/predict_result.py Normal file
View File

@@ -0,0 +1,162 @@
import numpy as np
from sklearn.cluster import DBSCAN
class PredictResult:
def __init__(self, raw_predict_result, input_pts=None, cluster_params=dict(eps=0.5, min_samples=2)):
self.input_pts = input_pts
self.cluster_params = cluster_params
self.sampled_9d_pose = raw_predict_result
self.sampled_matrix_pose = self.get_sampled_matrix_pose()
self.distance_matrix = self.calculate_distance_matrix()
self.clusters = self.get_cluster_result()
self.candidate_matrix_poses = self.get_candidate_poses()
self.candidate_9d_poses = [np.concatenate((self.matrix_to_rotation_6d_numpy(matrix[:3,:3]), matrix[:3,3].reshape(-1,)), axis=-1) for matrix in self.candidate_matrix_poses]
self.cluster_num = len(self.clusters)
@staticmethod
def rotation_6d_to_matrix_numpy(d6):
a1, a2 = d6[:3], d6[3:]
b1 = a1 / np.linalg.norm(a1)
b2 = a2 - np.dot(b1, a2) * b1
b2 = b2 / np.linalg.norm(b2)
b3 = np.cross(b1, b2)
return np.stack((b1, b2, b3), axis=-2)
@staticmethod
def matrix_to_rotation_6d_numpy(matrix):
return np.copy(matrix[:2, :]).reshape((6,))
def __str__(self):
info = "Predict Result:\n"
info += f" Predicted pose number: {len(self.sampled_9d_pose)}\n"
info += f" Cluster number: {self.cluster_num}\n"
for i, cluster in enumerate(self.clusters):
info += f" - Cluster {i} size: {len(cluster)}\n"
max_distance = np.max(self.distance_matrix[self.distance_matrix != 0])
min_distance = np.min(self.distance_matrix[self.distance_matrix != 0])
info += f" Max distance: {max_distance}\n"
info += f" Min distance: {min_distance}\n"
return info
def get_sampled_matrix_pose(self):
sampled_matrix_pose = []
for pose in self.sampled_9d_pose:
rotation = pose[:6]
translation = pose[6:]
pose = self.rotation_6d_to_matrix_numpy(rotation)
pose = np.concatenate((pose, translation.reshape(-1, 1)), axis=-1)
pose = np.concatenate((pose, np.array([[0, 0, 0, 1]])), axis=-2)
sampled_matrix_pose.append(pose)
return np.array(sampled_matrix_pose)
def rotation_distance(self, R1, R2):
R = np.dot(R1.T, R2)
trace = np.trace(R)
angle = np.arccos(np.clip((trace - 1) / 2, -1, 1))
return angle
def calculate_distance_matrix(self):
n = len(self.sampled_matrix_pose)
dist_matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
dist_matrix[i, j] = self.rotation_distance(self.sampled_matrix_pose[i][:3, :3], self.sampled_matrix_pose[j][:3, :3])
return dist_matrix
def cluster_rotations(self):
clustering = DBSCAN(eps=self.cluster_params['eps'], min_samples=self.cluster_params['min_samples'], metric='precomputed')
labels = clustering.fit_predict(self.distance_matrix)
return labels
def get_cluster_result(self):
labels = self.cluster_rotations()
cluster_num = len(set(labels)) - (1 if -1 in labels else 0)
clusters = []
for _ in range(cluster_num):
clusters.append([])
for matrix_pose, label in zip(self.sampled_matrix_pose, labels):
if label != -1:
clusters[label].append(matrix_pose)
clusters.sort(key=len, reverse=True)
return clusters
def get_center_matrix_pose_from_cluster(self, cluster):
min_total_distance = float('inf')
center_matrix_pose = None
for matrix_pose in cluster:
total_distance = 0
for other_matrix_pose in cluster:
rot_distance = self.rotation_distance(matrix_pose[:3, :3], other_matrix_pose[:3, :3])
total_distance += rot_distance
if total_distance < min_total_distance:
min_total_distance = total_distance
center_matrix_pose = matrix_pose
return center_matrix_pose
def get_candidate_poses(self):
candidate_poses = []
for cluster in self.clusters:
candidate_poses.append(self.get_center_matrix_pose_from_cluster(cluster))
return candidate_poses
def visualize(self):
import plotly.graph_objects as go
fig = go.Figure()
if self.input_pts is not None:
fig.add_trace(go.Scatter3d(
x=self.input_pts[:, 0], y=self.input_pts[:, 1], z=self.input_pts[:, 2],
mode='markers', marker=dict(size=1, color='gray', opacity=0.5), name='Input Points'
))
colors = ['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg']
for i, cluster in enumerate(self.clusters):
color = colors[i]
candidate_pose = self.candidate_matrix_poses[i]
origin_candidate = candidate_pose[:3, 3]
z_axis_candidate = candidate_pose[:3, 2]
for pose in cluster:
origin = pose[:3, 3]
z_axis = pose[:3, 2]
fig.add_trace(go.Cone(
x=[origin[0]], y=[origin[1]], z=[origin[2]],
u=[z_axis[0]], v=[z_axis[1]], w=[z_axis[2]],
colorscale=color,
sizemode="absolute", sizeref=0.05, anchor="tail", showscale=False
))
fig.add_trace(go.Cone(
x=[origin_candidate[0]], y=[origin_candidate[1]], z=[origin_candidate[2]],
u=[z_axis_candidate[0]], v=[z_axis_candidate[1]], w=[z_axis_candidate[2]],
colorscale=color,
sizemode="absolute", sizeref=0.1, anchor="tail", showscale=False
))
fig.update_layout(
title="Clustered Poses and Input Points",
scene=dict(
xaxis_title='X',
yaxis_title='Y',
zaxis_title='Z'
),
margin=dict(l=0, r=0, b=0, t=40),
scene_camera=dict(eye=dict(x=1.25, y=1.25, z=1.25))
)
fig.show()
if __name__ == "__main__":
step = 0
raw_predict_result = np.load(f"inference_result_pack/inference_result_pack/{step}/all_pred_pose_9d.npy")
input_pts = np.loadtxt(f"inference_result_pack/inference_result_pack/{step}/input_pts.txt")
print(raw_predict_result.shape)
predict_result = PredictResult(raw_predict_result, input_pts, cluster_params=dict(eps=0.25, min_samples=3))
print(predict_result)
print(len(predict_result.candidate_matrix_poses))
print(predict_result.distance_matrix)
#import ipdb; ipdb.set_trace()
predict_result.visualize()

View File

@@ -6,16 +6,16 @@ runner:
cuda_visible_devices: "0,1,2,3,4,5,6,7"
experiment:
name: train_ab_global_only
name: train_ab_global_only_p++_wp
root_dir: "experiments"
epoch: -1 # -1 stands for last epoch
epoch: 922 # -1 stands for last epoch
test:
dataset_list:
- OmniObject3d_test
blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py"
output_dir: "/media/hofee/data/results/nbv_rec_inference/global_only_ycb_241204"
output_dir: "/media/hofee/data/data/p++_wp"
pipeline: nbv_reconstruction_pipeline
voxel_size: 0.003
min_new_area: 1.0
@@ -34,8 +34,8 @@ dataset:
# load_from_preprocess: True
OmniObject3d_test:
root_dir: "/media/hofee/data/results/ycb_preprocessed_dataset"
model_dir: "/media/hofee/data/data/ycb_obj"
root_dir: "/media/hofee/data/data/new_testset_output"
model_dir: "/media/hofee/data/data/scaled_object_meshes"
source: seq_reconstruction_dataset_preprocessed
# split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt"
type: test
@@ -52,7 +52,7 @@ dataset:
pipeline:
nbv_reconstruction_pipeline:
modules:
pts_encoder: pointnet_encoder
pts_encoder: pointnet++_encoder
seq_encoder: transformer_seq_encoder
pose_encoder: pose_encoder
view_finder: gf_view_finder
@@ -60,6 +60,10 @@ pipeline:
global_scanned_feat: True
module:
pointnet++_encoder:
in_dim: 3
params_name: light
pointnet_encoder:
in_dim: 3
out_dim: 1024

View File

@@ -0,0 +1,36 @@
runner:
general:
seed: 0
device: cuda
cuda_visible_devices: "0,1,2,3,4,5,6,7"
experiment:
name: simulation_debug
root_dir: "experiments"
simulation:
robot:
urdf_path: "assets/franka_panda/panda.urdf"
initial_position: [0, 0, 0] # 机械臂基座位置
initial_orientation: [0, 0, 0] # 机械臂基座朝向(欧拉角)
turntable:
radius: 0.3 # 转盘半径(米)
height: 0.1 # 转盘高度
center_position: [0.8, 0, 0.4]
target:
obj_dir: /media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/assets/object_meshes
obj_name: "google_scan-box_0185"
scale: 1.0 # 缩放系数
mass: 0.1 # 质量(kg)
rgba_color: [0.8, 0.8, 0.8, 1.0] # 目标物体颜色
camera:
width: 640
height: 480
fov: 40
near: 0.01
far: 5.0
displaytable:

View File

@@ -15,13 +15,13 @@ runner:
overlap_area_threshold: 30
compute_with_normal: False
scan_points_threshold: 10
overwrite: False
overwrite: False
seq_num: 10
dataset_list:
- OmniObject3d
datasets:
OmniObject3d:
root_dir: /media/hofee/data/results/ycb_view_data
root_dir: /media/hofee/data/data/test_bottle/view
from: 0
to: -1 # ..-1 means end

View File

@@ -8,16 +8,16 @@ runner:
root_dir: experiments
generate:
port: 5002
from: 1
from: 0
to: 50 # -1 means all
object_dir: /media/hofee/data/data/ycb_obj
object_dir: /media/hofee/data/data/test_bottle/bottle_mesh
table_model_path: /media/hofee/data/data/others/table.obj
output_dir: /media/hofee/data/results/ycb_view_data
output_dir: /media/hofee/data/data/test_bottle/view
binocular_vision: true
plane_size: 10
max_views: 512
min_views: 128
random_view_ratio: 0.02
random_view_ratio: 0.002
min_cam_table_included_degree: 20
max_diag: 0.7
min_diag: 0.01
@@ -34,7 +34,7 @@ runner:
max_y: 0.05
min_z: 0.01
max_z: 0.01
random_rotation_ratio: 0.3
random_rotation_ratio: 0.0
random_objects:
num: 4
cluster: 0.9

View File

@@ -7,19 +7,19 @@ runner:
parallel: False
experiment:
name: train_ab_global_only
name: train_ab_global_only_with_wp_p++_strong
root_dir: "experiments"
use_checkpoint: True
use_checkpoint: False
epoch: -1 # -1 stands for last epoch
max_epochs: 5000
save_checkpoint_interval: 1
test_first: True
test_first: False
train:
optimizer:
type: Adam
lr: 0.0001
losses:
losses:
- gf_loss
dataset: OmniObject3d_train
test:
@@ -39,7 +39,7 @@ dataset:
type: train
cache: True
ratio: 1
batch_size: 80
batch_size: 64
num_workers: 128
pts_num: 8192
load_from_preprocess: True
@@ -80,7 +80,7 @@ dataset:
pipeline:
nbv_reconstruction_pipeline:
modules:
pts_encoder: pointnet_encoder
pts_encoder: pointnet++_encoder
seq_encoder: transformer_seq_encoder
pose_encoder: pose_encoder
view_finder: gf_view_finder
@@ -96,6 +96,10 @@ module:
global_feat: True
feature_transform: False
pointnet++_encoder:
in_dim: 3
params_name: strong
transformer_seq_encoder:
embed_dim: 256
num_heads: 4
@@ -106,7 +110,7 @@ module:
gf_view_finder:
t_feat_dim: 128
pose_feat_dim: 256
main_feat_dim: 2048
main_feat_dim: 5120
regression_head: Rx_Ry_and_T
pose_mode: rot_matrix
per_point_feature: False

View File

@@ -7,6 +7,7 @@ from PytorchBoot.utils.log_util import Log
import torch
import os
import sys
import time
sys.path.append(r"/data/hofee/project/nbv_rec/nbv_reconstruction")
@@ -114,8 +115,13 @@ class NBVReconstructionDataset(BaseDataset):
except Exception as e:
Log.error(f"Save cache failed: {e}")
def voxel_downsample_with_mask(self, pts, voxel_size):
pass
def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
idx_sort = np.argsort(inverse)
idx_unique = idx_sort[np.cumsum(counts)-counts]
downsampled_points = point_cloud[idx_unique]
return downsampled_points, inverse
def __getitem__(self, index):
@@ -129,6 +135,9 @@ class NBVReconstructionDataset(BaseDataset):
scanned_coverages_rate,
scanned_n_to_world_pose,
) = ([], [], [])
start_time = time.time()
start_indices = [0]
total_points = 0
for view in scanned_views:
frame_idx = view[0]
coverage_rate = view[1]
@@ -150,8 +159,12 @@ class NBVReconstructionDataset(BaseDataset):
n_to_world_trans = n_to_world_pose[:3, 3]
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0)
scanned_n_to_world_pose.append(n_to_world_9d)
total_points += len(downsampled_target_point_cloud)
start_indices.append(total_points)
end_time = time.time()
#Log.info(f"load data time: {end_time - start_time}")
nbv_idx, nbv_coverage_rate = nbv[0], nbv[1]
nbv_path = DataLoadUtil.get_path(self.root_dir, scene_name, nbv_idx)
cam_info = DataLoadUtil.load_cam_info(nbv_path)
@@ -164,14 +177,27 @@ class NBVReconstructionDataset(BaseDataset):
best_to_world_9d = np.concatenate(
[best_to_world_6d, best_to_world_trans], axis=0
)
combined_scanned_views_pts = np.concatenate(scanned_views_pts, axis=0)
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_views_pts, 0.002)
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, self.pts_num)
combined_scanned_views_pts = np.concatenate(scanned_views_pts, axis=0)
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_views_pts, 0.003)
random_downsampled_combined_scanned_pts_np, random_downsample_idx = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, self.pts_num, require_idx=True)
all_idx_unique = np.arange(len(voxel_downsampled_combined_scanned_pts_np))
all_random_downsample_idx = all_idx_unique[random_downsample_idx]
scanned_pts_mask = []
for idx, start_idx in enumerate(start_indices):
if idx == len(start_indices) - 1:
break
end_idx = start_indices[idx+1]
view_inverse = inverse[start_idx:end_idx]
view_unique_downsampled_idx = np.unique(view_inverse)
view_unique_downsampled_idx_set = set(view_unique_downsampled_idx)
mask = np.array([idx in view_unique_downsampled_idx_set for idx in all_random_downsample_idx])
scanned_pts_mask.append(mask)
data_item = {
"scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
"combined_scanned_pts": np.asarray(random_downsampled_combined_scanned_pts_np, dtype=np.float32), # Ndarray(N x 3)
"scanned_pts_mask": np.asarray(scanned_pts_mask, dtype=np.bool), # Ndarray(N)
"scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1)
"scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
"best_coverage_rate": nbv_coverage_rate, # Float, range(0, 1)
@@ -197,7 +223,9 @@ class NBVReconstructionDataset(BaseDataset):
collate_data["scanned_n_to_world_pose_9d"] = [
torch.tensor(item["scanned_n_to_world_pose_9d"]) for item in batch
]
collate_data["scanned_pts_mask"] = [
torch.tensor(item["scanned_pts_mask"]) for item in batch
]
''' ------ Fixed Length ------ '''
collate_data["best_to_world_pose_9d"] = torch.stack(
@@ -206,12 +234,14 @@ class NBVReconstructionDataset(BaseDataset):
collate_data["combined_scanned_pts"] = torch.stack(
[torch.tensor(item["combined_scanned_pts"]) for item in batch]
)
for key in batch[0].keys():
if key not in [
"scanned_pts",
"scanned_n_to_world_pose_9d",
"best_to_world_pose_9d",
"combined_scanned_pts",
"scanned_pts_mask",
]:
collate_data[key] = [item[key] for item in batch]
return collate_data
@@ -227,9 +257,9 @@ if __name__ == "__main__":
torch.manual_seed(seed)
np.random.seed(seed)
config = {
"root_dir": "/data/hofee/data/packed_preprocessed_data",
"root_dir": "/data/hofee/nbv_rec_part2_preprocessed",
"source": "nbv_reconstruction_dataset",
"split_file": "/data/hofee/data/OmniObject3d_train.txt",
"split_file": "/data/hofee/data/sample.txt",
"load_from_preprocess": True,
"ratio": 0.5,
"batch_size": 2,

View File

@@ -75,6 +75,8 @@ class NBVReconstructionPipeline(nn.Module):
def forward_test(self, data):
main_feat = self.get_main_feat(data)
repeat_num = data.get("repeat_num", 1)
main_feat = main_feat.repeat(repeat_num, 1)
estimated_delta_rot_9d, in_process_sample = self.view_finder.next_best_view(
main_feat
)
@@ -89,25 +91,49 @@ class NBVReconstructionPipeline(nn.Module):
"scanned_n_to_world_pose_9d"
] # List(B): Tensor(S x 9)
scanned_pts_mask_batch = data["scanned_pts_mask"] # List(B): Tensor(N)
device = next(self.parameters()).device
embedding_list_batch = []
combined_scanned_pts_batch = data["combined_scanned_pts"] # Tensor(B x N x 3)
global_scanned_feat = self.pts_encoder.encode_points(
combined_scanned_pts_batch, require_per_point_feat=False
global_scanned_feat, per_point_feat_batch = self.pts_encoder.encode_points(
combined_scanned_pts_batch, require_per_point_feat=True
) # global_scanned_feat: Tensor(B x Dg)
for scanned_n_to_world_pose_9d in scanned_n_to_world_pose_9d_batch:
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d.to(device) # Tensor(S x 9)
batch_size = len(scanned_n_to_world_pose_9d_batch)
for i in range(batch_size):
seq_len = len(scanned_n_to_world_pose_9d_batch[i])
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d_batch[i].to(device) # Tensor(S x 9)
scanned_pts_mask = scanned_pts_mask_batch[i] # Tensor(S x N)
per_point_feat = per_point_feat_batch[i] # Tensor(N x Dp)
partial_point_feat_seq = []
for j in range(seq_len):
partial_per_point_feat = per_point_feat[scanned_pts_mask[j]]
if partial_per_point_feat.shape[0] == 0:
partial_point_feat = torch.zeros(per_point_feat.shape[1], device=device)
else:
partial_point_feat = torch.mean(partial_per_point_feat, dim=0) # Tensor(Dp)
partial_point_feat_seq.append(partial_point_feat)
partial_point_feat_seq = torch.stack(partial_point_feat_seq, dim=0) # Tensor(S x Dp)
pose_feat_seq = self.pose_encoder.encode_pose(scanned_n_to_world_pose_9d) # Tensor(S x Dp)
seq_embedding = pose_feat_seq
seq_embedding = torch.cat([partial_point_feat_seq, pose_feat_seq], dim=-1)
embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp))
seq_feat = self.seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg))
if torch.isnan(main_feat).any():
for i in range(len(main_feat)):
if torch.isnan(main_feat[i]).any():
scanned_pts_mask = scanned_pts_mask_batch[i]
Log.info(f"scanned_pts_mask shape: {scanned_pts_mask.shape}")
Log.info(f"scanned_pts_mask sum: {scanned_pts_mask.sum()}")
import ipdb
ipdb.set_trace()
Log.error("nan in main_feat", True)
return main_feat

View File

@@ -64,11 +64,15 @@ class SeqReconstructionDataset(BaseDataset):
scene_max_cr_idx = 0
frame_len = DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)
for i in range(frame_len):
for i in range(10,frame_len):
path = DataLoadUtil.get_path(self.root_dir, scene_name, i)
pts = DataLoadUtil.load_from_preprocessed_pts(path, "npy")
print(pts.shape)
if pts.shape[0] == 0:
continue
else:
break
print(i)
datalist.append({
"scene_name": scene_name,
"first_frame": i,
@@ -180,9 +184,9 @@ if __name__ == "__main__":
np.random.seed(seed)
config = {
"root_dir": "/media/hofee/data/results/ycb_view_data",
"root_dir": "/media/hofee/data/data/test_bottle/view",
"source": "seq_reconstruction_dataset",
"split_file": "/media/hofee/data/results/ycb_test.txt",
"split_file": "/media/hofee/data/data/test_bottle/test_bottle.txt",
"load_from_preprocess": True,
"filter_degree": 75,
"num_workers": 0,
@@ -190,7 +194,7 @@ if __name__ == "__main__":
"type": namespace.Mode.TEST,
}
output_dir = "/media/hofee/data/results/ycb_preprocessed_dataset"
output_dir = "/media/hofee/data/data/test_bottle/preprocessed_dataset"
os.makedirs(output_dir, exist_ok=True)
ds = SeqReconstructionDataset(config)

View File

@@ -21,7 +21,7 @@ class SeqReconstructionDatasetPreprocessed(BaseDataset):
super(SeqReconstructionDatasetPreprocessed, self).__init__(config)
self.config = config
self.root_dir = config["root_dir"]
self.real_root_dir = r"/media/hofee/data/results/ycb_view_data"
self.real_root_dir = r"/media/hofee/data/data/new_testset"
self.item_list = os.listdir(self.root_dir)
def __getitem__(self, index):
@@ -66,7 +66,7 @@ if __name__ == "__main__":
load_from_preprocess: True
'''
config = {
"root_dir": "H:\\AI\\Datasets\\packed_test_data",
"root_dir": "/media/hofee/data/data/test_bottle/preprocessed_dataset",
"source": "seq_reconstruction_dataset",
"split_file": "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt",
"load_from_preprocess": True,

View File

@@ -1,4 +0,0 @@
pointnet2/build/
pointnet2/dist/
pointnet2/pointnet2.egg-info/
__pycache__/

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019 Shaoshuai Shi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,51 +0,0 @@
# Pointnet2.PyTorch
* PyTorch implementation of [PointNet++](https://arxiv.org/abs/1706.02413) based on [erikwijmans/Pointnet2_PyTorch](https://github.com/erikwijmans/Pointnet2_PyTorch).
* Faster than the original codes by re-implementing the CUDA operations.
## Installation
### Requirements
* Linux (tested on Ubuntu 14.04/16.04)
* Python 3.6+
* PyTorch 1.0
### Install
Install this library by running the following command:
```shell
cd pointnet2
python setup.py install
cd ../
```
## Examples
Here I provide a simple example to use this library in the task of KITTI ourdoor foreground point cloud segmentation, and you could refer to the paper [PointRCNN](https://arxiv.org/abs/1812.04244) for the details of task description and foreground label generation.
1. Download the training data from [KITTI 3D object detection](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) website and organize the downloaded files as follows:
```
Pointnet2.PyTorch
├── pointnet2
├── tools
│ ├──data
│ │ ├── KITTI
│ │ │ ├── ImageSets
│ │ │ ├── object
│ │ │ │ ├──training
│ │ │ │ ├──calib & velodyne & label_2 & image_2
│ │ train_and_eval.py
```
2. Run the following command to train and evaluate:
```shell
cd tools
python train_and_eval.py --batch_size 8 --epochs 100 --ckpt_save_interval 2
```
## Project using this repo:
* [PointRCNN](https://github.com/sshaoshuai/PointRCNN): 3D object detector from raw point cloud.
## Acknowledgement
* [charlesq34/pointnet2](https://github.com/charlesq34/pointnet2): Paper author and official code repo.
* [erikwijmans/Pointnet2_PyTorch](https://github.com/erikwijmans/Pointnet2_PyTorch): Initial work of PyTorch implementation of PointNet++.

View File

@@ -1,23 +0,0 @@
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='pointnet2',
ext_modules=[
CUDAExtension('pointnet2_cuda', [
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
],
cmdclass={'build_ext': BuildExtension}
)

View File

@@ -1,28 +0,0 @@
#include <torch/serialize/tensor.h>
#include <vector>
// #include <THC/THC.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "ball_query_gpu.h"
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
// extern THCState *state;
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ")
#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) {
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(xyz_tensor);
const float *new_xyz = new_xyz_tensor.data<float>();
const float *xyz = xyz_tensor.data<float>();
int *idx = idx_tensor.data<int>();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx, stream);
return 1;
}

View File

@@ -1,67 +0,0 @@
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
if (d2 < radius2){
if (cnt == 0){
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}

View File

@@ -1,15 +0,0 @@
#ifndef _BALL_QUERY_GPU_H
#define _BALL_QUERY_GPU_H
#include <torch/serialize/tensor.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime_api.h>
int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor);
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample,
const float *xyz, const float *new_xyz, int *idx, cudaStream_t stream);
#endif

View File

@@ -1,15 +0,0 @@
#ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include <cmath>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
#endif

View File

@@ -1,37 +0,0 @@
#include <torch/serialize/tensor.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
// #include <THC/THC.h>
#include "group_points_gpu.h"
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
// extern THCState *state;
int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
float *grad_points = grad_points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
const float *grad_out = grad_out_tensor.data<float>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points, stream);
return 1;
}
int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) {
const float *points = points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *out = out_tensor.data<float>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out, stream);
return 1;
}

View File

@@ -1,86 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "group_points_gpu.h"
__global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample,
const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]);
}
void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample,
const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
int in_idx = bs_idx * c * n + c_idx * n + idx[0];
int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
out[out_idx] = points[in_idx];
}
void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx, float *out, cudaStream_t stream) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, nsample, points, idx, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}

View File

@@ -1,22 +0,0 @@
#ifndef _GROUP_POINTS_GPU_H
#define _GROUP_POINTS_GPU_H
#include <torch/serialize/tensor.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx, float *out, cudaStream_t stream);
int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream);
#endif

View File

@@ -1,59 +0,0 @@
#include <torch/serialize/tensor.h>
#include <vector>
// #include <THC/THC.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "interpolate_gpu.h"
// extern THCState *state;
void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) {
const float *unknown = unknown_tensor.data<float>();
const float *known = known_tensor.data<float>();
float *dist2 = dist2_tensor.data<float>();
int *idx = idx_tensor.data<int>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx, stream);
}
void three_interpolate_wrapper_fast(int b, int c, int m, int n,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor out_tensor) {
const float *points = points_tensor.data<float>();
const float *weight = weight_tensor.data<float>();
float *out = out_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out, stream);
}
void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor grad_points_tensor) {
const float *grad_out = grad_out_tensor.data<float>();
const float *weight = weight_tensor.data<float>();
float *grad_points = grad_points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points, stream);
}

View File

@@ -1,161 +0,0 @@
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "interpolate_gpu.h"
__global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown,
const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2; besti3 = besti2;
best2 = best1; besti2 = besti1;
best1 = d; besti1 = k;
}
else if (d < best2) {
best3 = best2; besti3 = besti2;
best2 = d; besti2 = k;
}
else if (d < best3) {
best3 = d; besti3 = k;
}
}
dist2[0] = best1; dist2[1] = best2; dist2[2] = best3;
idx[0] = besti1; idx[1] = besti2; idx[2] = besti3;
}
void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx, cudaStream_t stream) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_nn_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, unknown, known, dist2, idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points,
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
const float *points, const int *idx, const float *weight, float *out, cudaStream_t stream) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, m, n, points, idx, weight, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
const int *idx, const float *weight, float *grad_points, cudaStream_t stream) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_grad_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, m, grad_out, idx, weight, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}

View File

@@ -1,30 +0,0 @@
#ifndef _INTERPOLATE_GPU_H
#define _INTERPOLATE_GPU_H
#include <torch/serialize/tensor.h>
#include<vector>
#include <cuda.h>
#include <cuda_runtime_api.h>
void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor);
void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx, cudaStream_t stream);
void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor,
at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor);
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
const float *points, const int *idx, const float *weight, float *out, cudaStream_t stream);
void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor,
at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor);
void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
const int *idx, const float *weight, float *grad_points, cudaStream_t stream);
#endif

View File

@@ -1,24 +0,0 @@
#include <torch/serialize/tensor.h>
#include <torch/extension.h>
#include "ball_query_gpu.h"
#include "group_points_gpu.h"
#include "sampling_gpu.h"
#include "interpolate_gpu.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast");
m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast");
m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast");
m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast");
m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast");
m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper");
m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast");
m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast");
m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast");
}

View File

@@ -1,51 +0,0 @@
#include <torch/serialize/tensor.h>
#include <ATen/cuda/CUDAContext.h>
#include <vector>
// #include <THC/THC.h>
#include "sampling_gpu.h"
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
// extern THCState *state;
int gather_points_wrapper_fast(int b, int c, int n, int npoints,
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){
const float *points = points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *out = out_tensor.data<float>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out, stream);
return 1;
}
int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
const float *grad_out = grad_out_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *grad_points = grad_points_tensor.data<float>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points, stream);
return 1;
}
int furthest_point_sampling_wrapper(int b, int n, int m,
at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {
const float *points = points_tensor.data<float>();
float *temp = temp_tensor.data<float>();
int *idx = idx_tensor.data<int>();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx, stream);
return 1;
}

View File

@@ -1,253 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
__global__ void gather_points_kernel_fast(int b, int c, int n, int m,
const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, M)
// output:
// out: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
points += bs_idx * c * n + c_idx * n;
out[0] = points[idx[0]];
}
void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints,
const float *points, const int *idx, float *out, cudaStream_t stream) {
// points: (B, C, N)
// idx: (B, npoints)
// output:
// out: (B, C, npoints)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, points, idx, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, float *__restrict__ grad_points) {
// grad_out: (B, C, M)
// idx: (B, M)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
grad_out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
grad_points += bs_idx * c * n + c_idx * n;
atomicAdd(grad_points + idx[0], grad_out[0]);
}
void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints,
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_grad_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(int b, int n, int m,
const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp, int *idxs, cudaStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 512:
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 256:
furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 128:
furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 64:
furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 32:
furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 16:
furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 8:
furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 4:
furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 2:
furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 1:
furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
default:
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}

View File

@@ -1,29 +0,0 @@
#ifndef _SAMPLING_GPU_H
#define _SAMPLING_GPU_H
#include <torch/serialize/tensor.h>
#include <ATen/cuda/CUDAContext.h>
#include<vector>
int gather_points_wrapper_fast(int b, int c, int n, int npoints,
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints,
const float *points, const int *idx, float *out, cudaStream_t stream);
int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints,
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream);
int furthest_point_sampling_wrapper(int b, int n, int m,
at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor);
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp, int *idxs, cudaStream_t stream);
#endif

View File

@@ -1,2 +0,0 @@
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../'))

View File

@@ -1,188 +0,0 @@
import os
import numpy as np
import torch.utils.data as torch_data
import kitti_utils
import cv2
from PIL import Image
USE_INTENSITY = False
class KittiDataset(torch_data.Dataset):
def __init__(self, root_dir, split='train', mode='TRAIN'):
self.split = split
self.mode = mode
self.classes = ['Car']
is_test = self.split == 'test'
self.imageset_dir = os.path.join(root_dir, 'KITTI', 'object', 'testing' if is_test else 'training')
split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', split + '.txt')
self.image_idx_list = [x.strip() for x in open(split_dir).readlines()]
self.sample_id_list = [int(sample_id) for sample_id in self.image_idx_list]
self.num_sample = self.image_idx_list.__len__()
self.npoints = 16384
self.image_dir = os.path.join(self.imageset_dir, 'image_2')
self.lidar_dir = os.path.join(self.imageset_dir, 'velodyne')
self.calib_dir = os.path.join(self.imageset_dir, 'calib')
self.label_dir = os.path.join(self.imageset_dir, 'label_2')
self.plane_dir = os.path.join(self.imageset_dir, 'planes')
def get_image(self, idx):
img_file = os.path.join(self.image_dir, '%06d.png' % idx)
assert os.path.exists(img_file)
return cv2.imread(img_file) # (H, W, 3) BGR mode
def get_image_shape(self, idx):
img_file = os.path.join(self.image_dir, '%06d.png' % idx)
assert os.path.exists(img_file)
im = Image.open(img_file)
width, height = im.size
return height, width, 3
def get_lidar(self, idx):
lidar_file = os.path.join(self.lidar_dir, '%06d.bin' % idx)
assert os.path.exists(lidar_file)
return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)
def get_calib(self, idx):
calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx)
assert os.path.exists(calib_file)
return kitti_utils.Calibration(calib_file)
def get_label(self, idx):
label_file = os.path.join(self.label_dir, '%06d.txt' % idx)
assert os.path.exists(label_file)
return kitti_utils.get_objects_from_label(label_file)
@staticmethod
def get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape):
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def filtrate_objects(self, obj_list):
type_whitelist = self.classes
if self.mode == 'TRAIN':
type_whitelist = list(self.classes)
if 'Car' in self.classes:
type_whitelist.append('Van')
valid_obj_list = []
for obj in obj_list:
if obj.cls_type not in type_whitelist:
continue
valid_obj_list.append(obj)
return valid_obj_list
def __len__(self):
return len(self.sample_id_list)
def __getitem__(self, index):
sample_id = int(self.sample_id_list[index])
calib = self.get_calib(sample_id)
img_shape = self.get_image_shape(sample_id)
pts_lidar = self.get_lidar(sample_id)
# get valid point (projected points should be in image)
pts_rect = calib.lidar_to_rect(pts_lidar[:, 0:3])
pts_intensity = pts_lidar[:, 3]
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
pts_valid_flag = self.get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape)
pts_rect = pts_rect[pts_valid_flag][:, 0:3]
pts_intensity = pts_intensity[pts_valid_flag]
if self.npoints < len(pts_rect):
pts_depth = pts_rect[:, 2]
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
near_idxs_choice = np.random.choice(near_idxs, self.npoints - len(far_idxs_choice), replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
np.random.shuffle(choice)
else:
choice = np.arange(0, len(pts_rect), dtype=np.int32)
if self.npoints > len(pts_rect):
extra_choice = np.random.choice(choice, self.npoints - len(pts_rect), replace=False)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
ret_pts_rect = pts_rect[choice, :]
ret_pts_intensity = pts_intensity[choice] - 0.5 # translate intensity to [-0.5, 0.5]
pts_features = [ret_pts_intensity.reshape(-1, 1)]
ret_pts_features = np.concatenate(pts_features, axis=1) if pts_features.__len__() > 1 else pts_features[0]
sample_info = {'sample_id': sample_id}
if self.mode == 'TEST':
if USE_INTENSITY:
pts_input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1) # (N, C)
else:
pts_input = ret_pts_rect
sample_info['pts_input'] = pts_input
sample_info['pts_rect'] = ret_pts_rect
sample_info['pts_features'] = ret_pts_features
return sample_info
gt_obj_list = self.filtrate_objects(self.get_label(sample_id))
gt_boxes3d = kitti_utils.objs_to_boxes3d(gt_obj_list)
# prepare input
if USE_INTENSITY:
pts_input = np.concatenate((ret_pts_rect, ret_pts_features), axis=1) # (N, C)
else:
pts_input = ret_pts_rect
# generate training labels
cls_labels = self.generate_training_labels(ret_pts_rect, gt_boxes3d)
sample_info['pts_input'] = pts_input
sample_info['pts_rect'] = ret_pts_rect
sample_info['cls_labels'] = cls_labels
return sample_info
@staticmethod
def generate_training_labels(pts_rect, gt_boxes3d):
cls_label = np.zeros((pts_rect.shape[0]), dtype=np.int32)
gt_corners = kitti_utils.boxes3d_to_corners3d(gt_boxes3d, rotate=True)
extend_gt_boxes3d = kitti_utils.enlarge_box3d(gt_boxes3d, extra_width=0.2)
extend_gt_corners = kitti_utils.boxes3d_to_corners3d(extend_gt_boxes3d, rotate=True)
for k in range(gt_boxes3d.shape[0]):
box_corners = gt_corners[k]
fg_pt_flag = kitti_utils.in_hull(pts_rect, box_corners)
cls_label[fg_pt_flag] = 1
# enlarge the bbox3d, ignore nearby points
extend_box_corners = extend_gt_corners[k]
fg_enlarge_flag = kitti_utils.in_hull(pts_rect, extend_box_corners)
ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)
cls_label[ignore_flag] = -1
return cls_label
def collate_batch(self, batch):
batch_size = batch.__len__()
ans_dict = {}
for key in batch[0].keys():
if isinstance(batch[0][key], np.ndarray):
ans_dict[key] = np.concatenate([batch[k][key][np.newaxis, ...] for k in range(batch_size)], axis=0)
else:
ans_dict[key] = [batch[k][key] for k in range(batch_size)]
if isinstance(batch[0][key], int):
ans_dict[key] = np.array(ans_dict[key], dtype=np.int32)
elif isinstance(batch[0][key], float):
ans_dict[key] = np.array(ans_dict[key], dtype=np.float32)
return ans_dict

View File

@@ -1,229 +0,0 @@
import numpy as np
from scipy.spatial import Delaunay
import scipy
def cls_type_to_id(cls_type):
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
def __init__(self, line):
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.trucation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.pos = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.pos)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_obj_level()
def get_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.trucation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 1 # Easy
elif height >= 25 and self.trucation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 2 # Moderate
elif height >= 25 and self.trucation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 3 # Hard
else:
self.level_str = 'UnKnown'
return 4
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.pos
return corners3d
def to_str(self):
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.trucation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
self.pos, self.ry)
return print_str
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.trucation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.pos[0], self.pos[1], self.pos[2],
self.ry)
return kitti_str
def get_calib_from_file(calib_file):
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
def __init__(self, calib_file):
if isinstance(calib_file, str):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def objs_to_boxes3d(obj_list):
boxes3d = np.zeros((obj_list.__len__(), 7), dtype=np.float32)
for k, obj in enumerate(obj_list):
boxes3d[k, 0:3], boxes3d[k, 3], boxes3d[k, 4], boxes3d[k, 5], boxes3d[k, 6] \
= obj.pos, obj.h, obj.w, obj.l, obj.ry
return boxes3d
def boxes3d_to_corners3d(boxes3d, rotate=True):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
:param rotate:
:return: corners3d: (N, 8, 3)
"""
boxes_num = boxes3d.shape[0]
h, w, l = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2.], dtype=np.float32).T # (N, 8)
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T # (N, 8)
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
if rotate:
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def enlarge_box3d(boxes3d, extra_width):
"""
:param boxes3d: (N, 7) [x, y, z, h, w, l, ry]
"""
if isinstance(boxes3d, np.ndarray):
large_boxes3d = boxes3d.copy()
else:
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += extra_width * 2
large_boxes3d[:, 1] += extra_width
return large_boxes3d
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag

View File

@@ -1,102 +0,0 @@
import torch
import torch.nn as nn
import sys
sys.path.append('..')
from pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG
import pointnet2.pytorch_utils as pt_utils
def get_model(input_channels=0):
return Pointnet2MSG(input_channels=input_channels)
NPOINTS = [4096, 1024, 256, 64]
RADIUS = [[0.1, 0.5], [0.5, 1.0], [1.0, 2.0], [2.0, 4.0]]
NSAMPLE = [[16, 32], [16, 32], [16, 32], [16, 32]]
MLPS = [[[16, 16, 32], [32, 32, 64]], [[64, 64, 128], [64, 96, 128]],
[[128, 196, 256], [128, 196, 256]], [[256, 256, 512], [256, 384, 512]]]
FP_MLPS = [[128, 128], [256, 256], [512, 512], [512, 512]]
CLS_FC = [128]
DP_RATIO = 0.5
class Pointnet2MSG(nn.Module):
def __init__(self, input_channels=6):
super().__init__()
self.SA_modules = nn.ModuleList()
channel_in = input_channels
skip_channel_list = [input_channels]
for k in range(NPOINTS.__len__()):
mlps = MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=NPOINTS[k],
radii=RADIUS[k],
nsamples=NSAMPLE[k],
mlps=mlps,
use_xyz=True,
bn=True
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(FP_MLPS.__len__()):
pre_channel = FP_MLPS[k + 1][-1] if k + 1 < len(FP_MLPS) else channel_out
self.FP_modules.append(
PointnetFPModule(mlp=[pre_channel + skip_channel_list[k]] + FP_MLPS[k])
)
cls_layers = []
pre_channel = FP_MLPS[0][-1]
for k in range(0, CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, CLS_FC[k], bn=True))
pre_channel = CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, 1, activation=None))
cls_layers.insert(1, nn.Dropout(0.5))
self.cls_layer = nn.Sequential(*cls_layers)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
print(li_xyz.shape, li_features.shape)
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
pred_cls = self.cls_layer(l_features[0]).transpose(1, 2).contiguous() # (B, N, 1)
return pred_cls
if __name__ == '__main__':
net = Pointnet2MSG(0).cuda()
pts = torch.randn(2, 1024, 3).cuda()
pre = net(pts)
print(pre.shape)

View File

@@ -1,217 +0,0 @@
import _init_path
import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
import tensorboard_logger as tb_log
from dataset import KittiDataset
import argparse
import importlib
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--ckpt_save_interval", type=int, default=5)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--ckpt", type=str, default='None')
parser.add_argument("--net", type=str, default='pointnet2_msg')
parser.add_argument('--lr', type=float, default=0.002)
parser.add_argument('--lr_decay', type=float, default=0.2)
parser.add_argument('--lr_clip', type=float, default=0.000001)
parser.add_argument('--decay_step_list', type=list, default=[50, 70, 80, 90])
parser.add_argument('--weight_decay', type=float, default=0.001)
parser.add_argument("--output_dir", type=str, default='output')
parser.add_argument("--extra_tag", type=str, default='default')
args = parser.parse_args()
FG_THRESH = 0.3
def log_print(info, log_f=None):
print(info)
if log_f is not None:
print(info, file=log_f)
class DiceLoss(nn.Module):
def __init__(self, ignore_target=-1):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input, target):
"""
:param input: (N), logit
:param target: (N), {0, 1}
:return:
"""
input = torch.sigmoid(input.view(-1))
target = target.float().view(-1)
mask = (target != self.ignore_target).float()
return 1.0 - (torch.min(input, target) * mask).sum() / torch.clamp((torch.max(input, target) * mask).sum(), min=1.0)
def train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f):
model.train()
log_print('===============TRAIN EPOCH %d================' % epoch, log_f=log_f)
loss_func = DiceLoss(ignore_target=-1)
for it, batch in enumerate(train_loader):
optimizer.zero_grad()
pts_input, cls_labels = batch['pts_input'], batch['cls_labels']
pts_input = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
cls_labels = torch.from_numpy(cls_labels).cuda(non_blocking=True).long().view(-1)
pred_cls = model(pts_input)
pred_cls = pred_cls.view(-1)
loss = loss_func(pred_cls, cls_labels)
loss.backward()
clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
total_it += 1
pred_class = (torch.sigmoid(pred_cls) > FG_THRESH)
fg_mask = cls_labels > 0
correct = ((pred_class.long() == cls_labels) & fg_mask).float().sum()
union = fg_mask.sum().float() + (pred_class > 0).sum().float() - correct
iou = correct / torch.clamp(union, min=1.0)
cur_lr = lr_scheduler.get_lr()[0]
tb_log.log_value('learning_rate', cur_lr, epoch)
if tb_log is not None:
tb_log.log_value('train_loss', loss, total_it)
tb_log.log_value('train_fg_iou', iou, total_it)
log_print('training epoch %d: it=%d/%d, total_it=%d, loss=%.5f, fg_iou=%.3f, lr=%f' %
(epoch, it, len(train_loader), total_it, loss.item(), iou.item(), cur_lr), log_f=log_f)
return total_it
def eval_one_epoch(model, eval_loader, epoch, tb_log=None, log_f=None):
model.train()
log_print('===============EVAL EPOCH %d================' % epoch, log_f=log_f)
iou_list = []
for it, batch in enumerate(eval_loader):
pts_input, cls_labels = batch['pts_input'], batch['cls_labels']
pts_input = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
cls_labels = torch.from_numpy(cls_labels).cuda(non_blocking=True).long().view(-1)
pred_cls = model(pts_input)
pred_cls = pred_cls.view(-1)
pred_class = (torch.sigmoid(pred_cls) > FG_THRESH)
fg_mask = cls_labels > 0
correct = ((pred_class.long() == cls_labels) & fg_mask).float().sum()
union = fg_mask.sum().float() + (pred_class > 0).sum().float() - correct
iou = correct / torch.clamp(union, min=1.0)
iou_list.append(iou.item())
log_print('EVAL: it=%d/%d, iou=%.3f' % (it, len(eval_loader), iou), log_f=log_f)
iou_list = np.array(iou_list)
avg_iou = iou_list.mean()
if tb_log is not None:
tb_log.log_value('eval_fg_iou', avg_iou, epoch)
log_print('\nEpoch %d: Average IoU (samples=%d): %.6f' % (epoch, iou_list.__len__(), avg_iou), log_f=log_f)
return avg_iou
def save_checkpoint(model, epoch, ckpt_name):
if isinstance(model, torch.nn.DataParallel):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
state = {'epoch': epoch, 'model_state': model_state}
ckpt_name = '{}.pth'.format(ckpt_name)
torch.save(state, ckpt_name)
def load_checkpoint(model, filename):
if os.path.isfile(filename):
log_print("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
log_print("==> Done")
else:
raise FileNotFoundError
return epoch
def train_and_eval(model, train_loader, eval_loader, tb_log, ckpt_dir, log_f):
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in args.decay_step_list:
if cur_epoch >= decay_step:
cur_decay = cur_decay * args.lr_decay
return max(cur_decay, args.lr_clip / args.lr)
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
total_it = 0
for epoch in range(1, args.epochs + 1):
lr_scheduler.step(epoch)
total_it = train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f)
if epoch % args.ckpt_save_interval == 0:
with torch.no_grad():
avg_iou = eval_one_epoch(model, eval_loader, epoch, tb_log, log_f)
ckpt_name = os.path.join(ckpt_dir, 'checkpoint_epoch_%d' % epoch)
save_checkpoint(model, epoch, ckpt_name)
if __name__ == '__main__':
MODEL = importlib.import_module(args.net) # import network module
model = MODEL.get_model(input_channels=0)
eval_set = KittiDataset(root_dir='./data', mode='EVAL', split='val')
eval_loader = DataLoader(eval_set, batch_size=args.batch_size, shuffle=False, pin_memory=True,
num_workers=args.workers, collate_fn=eval_set.collate_batch)
if args.mode == 'train':
train_set = KittiDataset(root_dir='./data', mode='TRAIN', split='train')
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, pin_memory=True,
num_workers=args.workers, collate_fn=train_set.collate_batch)
# output dir config
output_dir = os.path.join(args.output_dir, args.extra_tag)
os.makedirs(output_dir, exist_ok=True)
tb_log.configure(os.path.join(output_dir, 'tensorboard'))
ckpt_dir = os.path.join(output_dir, 'ckpt')
os.makedirs(ckpt_dir, exist_ok=True)
log_file = os.path.join(output_dir, 'log.txt')
log_f = open(log_file, 'w')
for key, val in vars(args).items():
log_print("{:16} {}".format(key, val), log_f=log_f)
# train and eval
train_and_eval(model, train_loader, eval_loader, tb_log, ckpt_dir, log_f)
log_f.close()
elif args.mode == 'eval':
epoch = load_checkpoint(model, args.ckpt)
model.cuda()
with torch.no_grad():
avg_iou = eval_one_epoch(model, eval_loader, epoch)
else:
raise NotImplementedError

View File

@@ -5,9 +5,11 @@ import sys
path = os.path.abspath(__file__)
for i in range(2):
path = os.path.dirname(path)
PROJECT_ROOT = path
PROJECT_ROOT = path
sys.path.append(PROJECT_ROOT)
from modules.module_lib.pointnet2_utils.pointnet2.pointnet2_modules import PointnetSAModuleMSG
import PytorchBoot.stereotype as stereotype
from modules.module_lib.pointnet2_modules import PointnetSAModuleMSG
ClsMSG_CFG_Dense = {
'NPOINTS': [512, 256, 128, None],
@@ -31,6 +33,30 @@ ClsMSG_CFG_Light = {
'DP_RATIO': 0.5,
}
ClsMSG_CFG_Light_2048 = {
'NPOINTS': [512, 256, 128, None],
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16], [None, None]],
'NSAMPLE': [[16, 32], [16, 32], [16, 32], [None, None]],
'MLPS': [[[16, 16, 32], [32, 32, 64]],
[[64, 64, 128], [64, 96, 128]],
[[128, 196, 256], [128, 196, 256]],
[[256, 256, 1024], [256, 512, 1024]]],
'DP_RATIO': 0.5,
}
ClsMSG_CFG_Strong = {
'NPOINTS': [512, 256, 128, 64, None],
'RADIUS': [[0.02, 0.04], [0.04, 0.08], [0.08, 0.16],[0.16, 0.32], [None, None]],
'NSAMPLE': [[16, 32], [16, 32], [16, 32], [16, 32], [None, None]],
'MLPS': [[[16, 16, 32], [32, 32, 64]],
[[64, 64, 128], [64, 96, 128]],
[[128, 196, 256], [128, 196, 256]],
[[256, 256, 512], [256, 512, 512]],
[[512, 512, 2048], [512, 1024, 2048]]
],
'DP_RATIO': 0.5,
}
ClsMSG_CFG_Lighter = {
'NPOINTS': [512, 256, 128, 64, None],
'RADIUS': [[0.01], [0.02], [0.04], [0.08], [None]],
@@ -51,6 +77,10 @@ def select_params(name):
return ClsMSG_CFG_Lighter
elif name == 'dense':
return ClsMSG_CFG_Dense
elif name == 'light_2048':
return ClsMSG_CFG_Light_2048
elif name == 'strong':
return ClsMSG_CFG_Strong
else:
raise NotImplementedError
@@ -65,18 +95,18 @@ def break_up_pc(pc):
return xyz, features
@stereotype.module("pointnet++_encoder")
class PointNet2Encoder(nn.Module):
def encode_points(self, pts):
def encode_points(self, pts, require_per_point_feat=False):
return self.forward(pts)
def __init__(self, config:dict):
super().__init__()
input_channels = config.get("in_dim", 0)
channel_in = config.get("in_dim", 3) - 3
params_name = config.get("params_name", "light")
self.SA_modules = nn.ModuleList()
channel_in = input_channels
selected_params = select_params(params_name)
for k in range(selected_params['NPOINTS'].__len__()):
mlps = selected_params['MLPS'][k].copy()
@@ -112,8 +142,8 @@ if __name__ == '__main__':
seed = 100
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
net = PointNet2Encoder(config={"in_dim": 0, "params_name": "light"}).cuda()
pts = torch.randn(2, 1024, 3).cuda()
net = PointNet2Encoder(config={"in_dim": 3, "params_name": "strong"}).cuda()
pts = torch.randn(2, 2444, 3).cuda()
print(torch.mean(pts, dim=1))
pre = net.encode_points(pts)
print(pre.shape)

View File

@@ -164,7 +164,7 @@ def save_scene_data(root, scene, scene_idx=0, scene_total=1,file_type="txt"):
if __name__ == "__main__":
#root = "/media/hofee/repository/new_data_with_normal"
root = r"/media/hofee/data/results/ycb_view_data"
root = r"/media/hofee/data/data/test_bottle/view"
scene_list = os.listdir(root)
from_idx = 0 # 1000
to_idx = len(scene_list) # 1500

View File

@@ -12,6 +12,7 @@ from PytorchBoot.runners.runner import Runner
from PytorchBoot.utils import Log
from utils.pts import PtsUtil
from beans.predict_result import PredictResult
@stereotype.runner("inferencer_server")
class InferencerServer(Runner):
@@ -50,6 +51,7 @@ class InferencerServer(Runner):
def get_result(self, output_data):
pred_pose_9d = output_data["pred_pose_9d"]
pred_pose_9d = np.asarray(PredictResult(pred_pose_9d.cpu().numpy(), None, cluster_params=dict(eps=0.25, min_samples=3)).candidate_9d_poses, dtype=np.float32)
result = {
"pred_pose_9d": pred_pose_9d.tolist()
}

View File

@@ -4,6 +4,7 @@ from utils.render import RenderUtil
from utils.pose import PoseUtil
from utils.pts import PtsUtil
from utils.reconstruction import ReconstructionUtil
from beans.predict_result import PredictResult
import torch
from tqdm import tqdm
@@ -82,6 +83,7 @@ class Inferencer(Runner):
data = test_set.__getitem__(i)
scene_name = data["scene_name"]
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl")
if os.path.exists(inference_result_path):
Log.info(f"Inference result already exists for scene: {scene_name}")
continue
@@ -135,81 +137,98 @@ class Inferencer(Runner):
pred_cr_seq = [last_pred_cr]
success = 0
last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], voxel_threshold).shape[0]
import time
#import time
while len(pred_cr_seq) < max_iter and retry < max_retry and success < max_success:
Log.green(f"iter: {len(pred_cr_seq)}, retry: {retry}/{max_retry}, success: {success}/{max_success}")
combined_scanned_pts = np.vstack(scanned_view_pts)
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold)
output = self.pipeline(input_data)
pred_pose_9d = output["pred_pose_9d"]
import ipdb; ipdb.set_trace()
pred_pose = torch.eye(4, device=pred_pose_9d.device)
pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9d[:,:6])[0]
pred_pose[:3,3] = pred_pose_9d[0,6:]
try:
new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
# # save pred_pose_9d ------
# root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction/temp_output_result"
# scene_dir = os.path.join(root, scene_name)
# if not os.path.exists(scene_dir):
# os.makedirs(scene_dir)
# pred_9d_path = os.path.join(scene_dir,f"pred_pose_9d_{len(pred_cr_seq)}.npy")
# pts_path = os.path.join(scene_dir,f"combined_scanned_pts_{len(pred_cr_seq)}.txt")
# np_combined_scanned_pts = input_data["combined_scanned_pts"][0].cpu().numpy()
# np.save(pred_9d_path, pred_pose_9d.cpu().numpy())
# np.savetxt(pts_path, np_combined_scanned_pts)
# # ----- ----- -----
predict_result = PredictResult(pred_pose_9d.cpu().numpy(), input_pts=input_data["combined_scanned_pts"][0].cpu().numpy(), cluster_params=dict(eps=0.25, min_samples=3))
# -----------------------
# import ipdb; ipdb.set_trace()
# predict_result.visualize()
# -----------------------
pred_pose_9d_candidates = predict_result.candidate_9d_poses
for pred_pose_9d in pred_pose_9d_candidates:
#import ipdb; ipdb.set_trace()
if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold):
curr_overlap_area_threshold = overlap_area_threshold
else:
curr_overlap_area_threshold = overlap_area_threshold * 0.5
pred_pose_9d = torch.tensor(pred_pose_9d, dtype=torch.float32).to(self.device).unsqueeze(0)
pred_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9d[:,:6])[0]
pred_pose[:3,3] = pred_pose_9d[0,6:]
try:
new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
#import ipdb; ipdb.set_trace()
if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold):
curr_overlap_area_threshold = overlap_area_threshold
else:
curr_overlap_area_threshold = overlap_area_threshold * 0.5
downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold)
overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True)
if not overlap:
Log.yellow("no overlap!")
downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold)
overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, voxel_downsampled_combined_scanned_pts_np, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True)
if not overlap:
Log.yellow("no overlap!")
retry += 1
retry_overlap_pose.append(pred_pose.cpu().numpy().tolist())
continue
history_indices.append(new_scan_points_indices)
except Exception as e:
Log.error(f"Error in scene {scene_path}, {e}")
print("current pose: ", pred_pose)
print("curr_pred_cr: ", last_pred_cr)
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
retry_overlap_pose.append(pred_pose.cpu().numpy().tolist())
continue
history_indices.append(new_scan_points_indices)
except Exception as e:
Log.error(f"Error in scene {scene_path}, {e}")
print("current pose: ", pred_pose)
print("curr_pred_cr: ", last_pred_cr)
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
continue
if new_target_pts.shape[0] == 0:
Log.red("no pts in new target")
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
continue
pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
Log.yellow(f"{pred_cr}, {last_pred_cr}, max: , {data['seq_max_coverage_rate']}")
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
print("max coverage rate reached!: ", pred_cr)
if new_target_pts.shape[0] == 0:
Log.red("no pts in new target")
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
continue
pred_cr_seq.append(pred_cr)
scanned_view_pts.append(new_target_pts)
pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
Log.yellow(f"{pred_cr}, {last_pred_cr}, max: , {data['seq_max_coverage_rate']}")
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
print("max coverage rate reached!: ", pred_cr)
input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)]
combined_scanned_pts = np.vstack(scanned_view_pts)
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, voxel_threshold)
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N)
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
last_pred_cr = pred_cr
pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0]
Log.info(f"delta pts num:,{pts_num - last_pts_num },{pts_num}, {last_pts_num}")
pred_cr_seq.append(pred_cr)
scanned_view_pts.append(new_target_pts)
input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)]
combined_scanned_pts = np.vstack(scanned_view_pts)
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, voxel_threshold)
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N)
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2:
retry += 1
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2:
success += 1
Log.success(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
last_pred_cr = pred_cr
pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0]
Log.info(f"delta pts num:,{pts_num - last_pts_num },{pts_num}, {last_pts_num}")
last_pts_num = pts_num
if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2:
retry += 1
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2:
success += 1
Log.success(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
last_pts_num = pts_num
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()

456
runners/simulator.py Normal file
View File

@@ -0,0 +1,456 @@
import pybullet as p
import pybullet_data
import numpy as np
import os
import time
from PytorchBoot.runners.runner import Runner
import PytorchBoot.stereotype as stereotype
from PytorchBoot.config import ConfigManager
from utils.control import ControlUtil
@stereotype.runner("simulator")
class Simulator(Runner):
CREATE: str = "create"
SIMULATE: str = "simulate"
INIT_GRIPPER_POSE:np.ndarray = np.asarray(
[[0.41869126 ,0.87596275 , 0.23951774 , 0.36005292],
[ 0.70787907 ,-0.4800251 , 0.51813998 ,-0.40499909],
[ 0.56884584, -0.04739109 ,-0.82107382 ,0.76881103],
[ 0. , 0. , 0. , 1. ]])
TURNTABLE_WORLD_TO_PYBULLET_WORLD:np.ndarray = np.asarray(
[[1, 0, 0, 0.8],
[0, 1, 0, 0],
[0, 0, 1, 0.5],
[0, 0, 0, 1]])
debug_pose = np.asarray([
[
0.992167055606842,
-0.10552699863910675,
0.06684812903404236,
-0.07388903945684433
],
[
0.10134342312812805,
0.3670985698699951,
-0.9246448874473572,
-0.41582486033439636
],
[
0.07303514331579208,
0.9241767525672913,
0.37491756677627563,
1.0754833221435547
],
[
0.0,
0.0,
0.0,
1.0
]])
def __init__(self, config_path):
super().__init__(config_path)
self.config_path = config_path
self.robot_id = None
self.turntable_id = None
self.target_id = None
camera_config = ConfigManager.get("simulation", "camera")
self.camera_params = {
'width': camera_config["width"],
'height': camera_config["height"],
'fov': camera_config["fov"],
'near': camera_config["near"],
'far': camera_config["far"]
}
self.sim_config = ConfigManager.get("simulation")
def run(self, cmd):
print(f"Simulator run {cmd}")
if cmd == self.CREATE:
self.prepare_env()
self.create_env()
elif cmd == self.SIMULATE:
self.simulate()
def simulate(self):
self.reset()
self.init()
debug_pose = Simulator.debug_pose
offset = np.asarray([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
debug_pose = debug_pose @ offset
for _ in range(10000):
debug_pose_2 = np.eye(4)
debug_pose_2[0,0] = -1
debug_pose_2[2,3] = 0.5
self.move_to(debug_pose_2)
# Wait for the system to stabilize
for _ in range(20): # Simulate 20 steps to ensure stability
p.stepSimulation()
time.sleep(0.001) # Add small delay to ensure physics simulation
depth_img, segm_img = self.take_picture()
p.stepSimulation()
def prepare_env(self):
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, 0)
p.loadURDF("plane.urdf")
def create_env(self):
print(self.config)
robot_config = self.sim_config["robot"]
turntable_config = self.sim_config["turntable"]
target_config = self.sim_config["target"]
self.robot_id = p.loadURDF(
robot_config["urdf_path"],
robot_config["initial_position"],
p.getQuaternionFromEuler(robot_config["initial_orientation"]),
useFixedBase=True
)
p.changeDynamics(
self.robot_id,
linkIndex=-1,
mass=0,
linearDamping=0,
angularDamping=0,
lateralFriction=0
)
visual_shape_id = p.createVisualShape(
shapeType=p.GEOM_CYLINDER,
radius=turntable_config["radius"],
length=turntable_config["height"],
rgbaColor=[0.7, 0.7, 0.7, 1]
)
collision_shape_id = p.createCollisionShape(
shapeType=p.GEOM_CYLINDER,
radius=turntable_config["radius"],
height=turntable_config["height"]
)
self.turntable_id = p.createMultiBody(
baseMass=0, # 设置质量为0使其成为静态物体
baseCollisionShapeIndex=collision_shape_id,
baseVisualShapeIndex=visual_shape_id,
basePosition=turntable_config["center_position"]
)
# 禁用转盘的动力学
p.changeDynamics(
self.turntable_id,
-1, # -1 表示基座
mass=0,
linearDamping=0,
angularDamping=0,
lateralFriction=0
)
obj_path = os.path.join(target_config["obj_dir"], target_config["obj_name"], "mesh.obj")
assert os.path.exists(obj_path), f"Error: File not found at {obj_path}"
# 加载OBJ文件作为目标物体
target_visual = p.createVisualShape(
shapeType=p.GEOM_MESH,
fileName=obj_path,
rgbaColor=target_config["rgba_color"],
specularColor=[0.4, 0.4, 0.4],
meshScale=[target_config["scale"]] * 3
)
# 使用简化的碰撞形状
target_collision = p.createCollisionShape(
shapeType=p.GEOM_MESH,
fileName=obj_path,
meshScale=[target_config["scale"]] * 3,
flags=p.GEOM_FORCE_CONCAVE_TRIMESH # 尝试使用凹面网格
)
# 创建目标物体
self.target_id = p.createMultiBody(
baseMass=0, # 设置质量为0使其成为静态物体
baseCollisionShapeIndex=target_collision,
baseVisualShapeIndex=target_visual,
basePosition=[
turntable_config["center_position"][0],
turntable_config["center_position"][1],
turntable_config["height"] + turntable_config["center_position"][2]
],
baseOrientation=p.getQuaternionFromEuler([np.pi/2, 0, 0])
)
# 禁用目标物体的动力学
p.changeDynamics(
self.target_id,
-1, # -1 表示基座
mass=0,
linearDamping=0,
angularDamping=0,
lateralFriction=0
)
# 创建固定约束,将目标物体固定在转盘上
cid = p.createConstraint(
parentBodyUniqueId=self.turntable_id,
parentLinkIndex=-1, # -1 表示基座
childBodyUniqueId=self.target_id,
childLinkIndex=-1, # -1 表示基座
jointType=p.JOINT_FIXED,
jointAxis=[0, 0, 0],
parentFramePosition=[0, 0, 0], # 相对于转盘中心的偏移
childFramePosition=[0, 0, 0] # 相对于物体中心的偏移
)
# 设置约束参数
p.changeConstraint(cid, maxForce=100) # 设置最大力,确保约束稳定
def move_robot_to_pose(self, target_matrix):
# 从4x4齐次矩阵中提取位置前3个元素
position = target_matrix[:3, 3]
# 从3x3旋转矩阵中提取方向四元数
R = target_matrix[:3, :3]
# 计算四元数的w分量
w = np.sqrt(max(0, 1 + R[0,0] + R[1,1] + R[2,2])) / 2
# 避免除零错误,同时处理不同情况
if abs(w) < 1e-8:
# 当w接近0时的特殊情况
x = np.sqrt(max(0, 1 + R[0,0] - R[1,1] - R[2,2])) / 2
y = np.sqrt(max(0, 1 - R[0,0] + R[1,1] - R[2,2])) / 2
z = np.sqrt(max(0, 1 - R[0,0] - R[1,1] + R[2,2])) / 2
# 确定符号
if R[2,1] - R[1,2] < 0: x = -x
if R[0,2] - R[2,0] < 0: y = -y
if R[1,0] - R[0,1] < 0: z = -z
else:
# 正常情况
x = (R[2,1] - R[1,2]) / (4 * w)
y = (R[0,2] - R[2,0]) / (4 * w)
z = (R[1,0] - R[0,1]) / (4 * w)
orientation = (x, y, z, w)
# 设置IK求解参数
num_joints = p.getNumJoints(self.robot_id)
lower_limits = []
upper_limits = []
joint_ranges = []
rest_poses = []
# 获取关节限制和默认姿态
for i in range(num_joints):
joint_info = p.getJointInfo(self.robot_id, i)
lower_limits.append(joint_info[8])
upper_limits.append(joint_info[9])
joint_ranges.append(joint_info[9] - joint_info[8])
rest_poses.append(0) # 可以设置一个较好的默认姿态
# 使用增强版IK求解器考虑碰撞避障
joint_poses = p.calculateInverseKinematics(
self.robot_id,
7, # end effector link index
position,
orientation,
lowerLimits=lower_limits,
upperLimits=upper_limits,
jointRanges=joint_ranges,
restPoses=rest_poses,
maxNumIterations=100,
residualThreshold=1e-4
)
# 分步移动到目标位置,同时检查碰撞
current_poses = [p.getJointState(self.robot_id, i)[0] for i in range(7)]
steps = 50 # 分50步移动
for step in range(steps):
# 线性插值计算中间位置
intermediate_poses = []
for current, target in zip(current_poses, joint_poses):
t = (step + 1) / steps
intermediate = current + (target - current) * t
intermediate_poses.append(intermediate)
# 设置关节位置
for i in range(7):
p.setJointMotorControl2(
self.robot_id,
i,
p.POSITION_CONTROL,
intermediate_poses[i]
)
# 执行一步模拟
p.stepSimulation()
# 检查碰撞
if p.getContactPoints(self.robot_id, self.turntable_id):
print("检测到潜在碰撞,停止移动")
return False
return True
def rotate_turntable(self, angle_degrees):
# 旋转转盘
current_pos, current_orn = p.getBasePositionAndOrientation(self.turntable_id)
current_orn = p.getEulerFromQuaternion(current_orn)
new_orn = list(current_orn)
new_orn[2] += np.radians(angle_degrees)
new_orn_quat = p.getQuaternionFromEuler(new_orn)
p.resetBasePositionAndOrientation(
self.turntable_id,
current_pos,
new_orn_quat
)
# 同时旋转目标物体
target_pos, target_orn = p.getBasePositionAndOrientation(self.target_id)
target_orn = p.getEulerFromQuaternion(target_orn)
# 更新目标物体的方向
target_orn = list(target_orn)
target_orn[2] += np.radians(angle_degrees)
target_orn_quat = p.getQuaternionFromEuler(target_orn)
# 计算物体新的位置(绕转盘中心旋转)
turntable_center = current_pos
relative_pos = np.array(target_pos) - np.array(turntable_center)
# 创建旋转矩阵
theta = np.radians(angle_degrees)
rotation_matrix = np.array([
[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]
])
# 计算新的相对位置
new_relative_pos = rotation_matrix.dot(relative_pos)
new_pos = np.array(turntable_center) + new_relative_pos
# 更新目标物体的位置和方向
p.resetBasePositionAndOrientation(
self.target_id,
new_pos,
target_orn_quat
)
def get_camera_pose(self):
end_effector_link = 7 # Franka末端执行器的链接索引
state = p.getLinkState(self.robot_id, end_effector_link)
ee_pos = state[0] # 世界坐标系中的位置
camera_orn = state[1] # 世界坐标系中的朝向(四元数)
# 计算相机的视角矩阵
rot_matrix = p.getMatrixFromQuaternion(camera_orn)
rot_matrix = np.array(rot_matrix).reshape(3, 3)
# 相机的前向向量与末端执行器的x轴对齐
camera_forward = rot_matrix.dot(np.array([0, 0, 1])) # x轴方向
# 将相机位置向前偏移0.1米
offset = 0.12
camera_pos = np.array(ee_pos) + camera_forward * offset
camera_target = camera_pos + camera_forward
# 相机的上向量与末端执行器的z轴对齐
camera_up = rot_matrix.dot(np.array([1, 0, 0])) # z轴方向
return camera_pos, camera_target, camera_up
def take_picture(self):
camera_pos, camera_target, camera_up = self.get_camera_pose()
view_matrix = p.computeViewMatrix(
cameraEyePosition=camera_pos,
cameraTargetPosition=camera_target,
cameraUpVector=camera_up
)
projection_matrix = p.computeProjectionMatrixFOV(
fov=self.camera_params['fov'],
aspect=self.camera_params['width'] / self.camera_params['height'],
nearVal=self.camera_params['near'],
farVal=self.camera_params['far']
)
_,_,rgb_img,depth_img,segm_img = p.getCameraImage(
width=self.camera_params['width'],
height=self.camera_params['height'],
viewMatrix=view_matrix,
projectionMatrix=projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
depth_img = self.camera_params['far'] * self.camera_params['near'] / (
self.camera_params['far'] - (self.camera_params['far'] - self.camera_params['near']) * depth_img)
depth_img = np.array(depth_img)
segm_img = np.array(segm_img)
return depth_img, segm_img
def reset(self):
target_pos = [0.5, 0, 1]
target_orn = p.getQuaternionFromEuler([np.pi, 0, 0])
target_matrix = np.eye(4)
target_matrix[:3, 3] = target_pos
target_matrix[:3, :3] = np.asarray(p.getMatrixFromQuaternion(target_orn)).reshape(3,3)
self.move_robot_to_pose(target_matrix)
def init(self):
self.move_to(Simulator.INIT_GRIPPER_POSE)
def move_to(self, pose: np.ndarray):
#delta_degree, min_new_cam_to_world = ControlUtil.solve_display_table_rot_and_cam_to_world(pose)
#print(delta_degree)
min_new_cam_to_pybullet_world = Simulator.TURNTABLE_WORLD_TO_PYBULLET_WORLD@pose
self.move_to_cam_pose(min_new_cam_to_pybullet_world)
#self.rotate_turntable(delta_degree)
def __del__(self):
p.disconnect()
def create_experiment(self, backup_name=None):
return super().create_experiment(backup_name)
def load_experiment(self, backup_name=None):
super().load_experiment(backup_name)
def move_to_cam_pose(self, camera_pose: np.ndarray):
# 从相机位姿矩阵中提取位置和旋转矩阵
camera_pos = camera_pose[:3, 3]
R_camera = camera_pose[:3, :3]
# 相机的朝向向量z轴
forward = R_camera[:, 2]
# 由于相机与末端执行器之间有固定偏移,需要计算末端执行器位置
# 相机在末端执行器前方0.12米
gripper_pos = camera_pos - forward * 0.12
# 末端执行器的旋转矩阵需要考虑与相机坐标系的固定变换
# 假设相机的forward对应gripper的z轴相机的x轴对应gripper的x轴
R_gripper = R_camera
# 构建4x4齐次变换矩阵
gripper_pose = np.eye(4)
gripper_pose[:3, :3] = R_gripper
gripper_pose[:3, 3] = gripper_pos
print(gripper_pose)
# 移动机器人到计算出的位姿
return self.move_robot_to_pose(gripper_pose)

59
utils/control.py Normal file
View File

@@ -0,0 +1,59 @@
import numpy as np
from scipy.spatial.transform import Rotation as R
import time
class ControlUtil:
curr_rotation = 0
@staticmethod
def check_limit(new_cam_to_world):
if new_cam_to_world[0,3] < 0 or new_cam_to_world[1,3] > 0:
# if new_cam_to_world[0,3] > 0:
return False
x = abs(new_cam_to_world[0,3])
y = abs(new_cam_to_world[1,3])
tan_y_x = y/x
min_angle = 0 / 180 * np.pi
max_angle = 90 / 180 * np.pi
if tan_y_x < np.tan(min_angle) or tan_y_x > np.tan(max_angle):
return False
return True
@staticmethod
def solve_display_table_rot_and_cam_to_world(cam_to_world: np.ndarray) -> tuple:
if ControlUtil.check_limit(cam_to_world):
return 0, cam_to_world
else:
min_display_table_rot = 180
min_new_cam_to_world = None
for display_table_rot in np.linspace(0.1,360, 1800):
new_world_to_world = ControlUtil.get_z_axis_rot_mat(display_table_rot)
new_cam_to_new_world = cam_to_world
new_cam_to_world = new_world_to_world @ new_cam_to_new_world
if ControlUtil.check_limit(new_cam_to_world):
if display_table_rot < min_display_table_rot:
min_display_table_rot, min_new_cam_to_world = display_table_rot, new_cam_to_world
if abs(display_table_rot - 360) < min_display_table_rot:
min_display_table_rot, min_new_cam_to_world = display_table_rot - 360, new_cam_to_world
if min_new_cam_to_world is None:
raise ValueError("No valid display table rotation found")
delta_degree = min_display_table_rot - ControlUtil.curr_rotation
ControlUtil.curr_rotation = min_display_table_rot
return delta_degree, min_new_cam_to_world
@staticmethod
def get_z_axis_rot_mat(degree):
radian = np.radians(degree)
return np.array([
[np.cos(radian), -np.sin(radian), 0, 0],
[np.sin(radian), np.cos(radian), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])

View File

@@ -70,7 +70,7 @@ class RenderUtil:
@staticmethod
def render_pts(cam_pose, scene_path, script_path, scan_points, voxel_threshold=0.005, filter_degree=75, nO_to_nL_pose=None, require_full_scene=False):
import ipdb; ipdb.set_trace()
nO_to_world_pose = DataLoadUtil.get_real_cam_O_from_cam_L(cam_pose, nO_to_nL_pose, scene_path=scene_path)
@@ -88,6 +88,7 @@ class RenderUtil:
'/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', script_path, '--', temp_dir
], capture_output=True, text=True)
#print(result)
#import ipdb; ipdb.set_trace()
path = os.path.join(temp_dir, "tmp")
cam_info = DataLoadUtil.load_cam_info(path, binocular=True)
depth_L, depth_R = DataLoadUtil.load_depth(

View File

@@ -7,6 +7,7 @@ import trimesh
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.data_load import DataLoadUtil
from utils.pts import PtsUtil
from utils.pose import PoseUtil
class visualizeUtil:
@@ -33,7 +34,22 @@ class visualizeUtil:
all_cam_axis = np.array(all_cam_axis).reshape(-1, 3)
np.savetxt(os.path.join(output_dir, "all_cam_pos.txt"), all_cam_pos)
np.savetxt(os.path.join(output_dir, "all_cam_axis.txt"), all_cam_axis)
@staticmethod
def get_cam_pose_and_cam_axis(cam_pose, is_6d_pose):
if is_6d_pose:
matrix_cam_pose = np.eye(4)
matrix_cam_pose[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(cam_pose[:6])
matrix_cam_pose[:3, 3] = cam_pose[6:]
else:
matrix_cam_pose = cam_pose
cam_pos = matrix_cam_pose[:3, 3]
cam_axis = matrix_cam_pose[:3, 2]
num_samples = 10
sample_points = [cam_pos + 0.02*t * cam_axis for t in range(num_samples)]
sample_points = np.array(sample_points)
return cam_pos, sample_points
@staticmethod
def save_all_combined_pts(root, scene, output_dir):
length = DataLoadUtil.get_scene_seq_length(root, scene)