5 Commits

Author SHA1 Message Date
be835aded4 finish partial_global inference 2024-11-26 15:40:00 +08:00
2c8ef20321 upd ab_global_only 2024-11-20 15:24:45 +08:00
hofee
493639287e update calculating pts_num in inference.py 2024-11-07 19:42:44 +08:00
hofee
6a608ea74b upd inference_server 2024-11-06 20:07:33 +08:00
hofee
6f427785b3 upd inference 2024-11-05 12:17:20 -06:00
9 changed files with 168 additions and 136 deletions

View File

@@ -6,7 +6,7 @@ runner:
cuda_visible_devices: "0,1,2,3,4,5,6,7"
experiment:
name: train_ab_global_only
name: train_ab_partial
root_dir: "experiments"
epoch: -1 # -1 stands for last epoch
@@ -14,11 +14,11 @@ runner:
dataset_list:
- OmniObject3d_test
blender_script_path: "C:\\Document\\Local Project\\nbv_rec\\blender\\data_renderer.py"
output_dir: "C:\\Document\\Datasets\\inference_scan_pts_overlap_global_full_on_testset"
blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py"
output_dir: "/media/hofee/data/data/new_partial_inference_test_output"
pipeline: nbv_reconstruction_pipeline
voxel_size: 0.003
min_new_area: 1.0
dataset:
# OmniObject3d_train:
# root_dir: "C:\\Document\\Datasets\\inference_test1"
@@ -34,10 +34,10 @@ dataset:
# load_from_preprocess: True
OmniObject3d_test:
root_dir: "C:\\Document\\Datasets\\inference_test"
model_dir: "C:\\Document\\Datasets\\scaled_object_meshes"
root_dir: "/media/hofee/data/data/new_testset_output"
model_dir: "/media/hofee/data/data/scaled_object_meshes"
source: seq_reconstruction_dataset_preprocessed
split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt"
# split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt"
type: test
filter_degree: 75
eval_list:
@@ -66,7 +66,7 @@ module:
global_feat: True
feature_transform: False
transformer_seq_encoder:
embed_dim: 256
embed_dim: 320
num_heads: 4
ffn_dim: 256
num_layers: 3

View File

@@ -8,11 +8,11 @@ runner:
root_dir: experiments
generate:
port: 5002
from: 600
to: -1 # -1 means all
object_dir: /media/hofee/data/data/object_meshes_part1
table_model_path: "/media/hofee/data/data/others/table.obj"
output_dir: /media/hofee/repository/data_part_1
from: 1
to: 50 # -1 means all
object_dir: C:\\Document\\Datasets\\scaled_object_meshes
table_model_path: C:\\Document\\Datasets\\table.obj
output_dir: C:\\Document\\Datasets\\debug_generate_view
binocular_vision: true
plane_size: 10
max_views: 512

View File

@@ -88,26 +88,49 @@ class NBVReconstructionPipeline(nn.Module):
scanned_n_to_world_pose_9d_batch = data[
"scanned_n_to_world_pose_9d"
] # List(B): Tensor(S x 9)
scanned_pts_mask_batch = data["scanned_pts_mask"] # List(B): Tensor(S x N)
device = next(self.parameters()).device
embedding_list_batch = []
combined_scanned_pts_batch = data["combined_scanned_pts"] # Tensor(B x N x 3)
global_scanned_feat = self.pts_encoder.encode_points(
combined_scanned_pts_batch, require_per_point_feat=False
global_scanned_feat, per_point_feat_batch = self.pts_encoder.encode_points(
combined_scanned_pts_batch, require_per_point_feat=True
) # global_scanned_feat: Tensor(B x Dg)
batch_size = len(scanned_n_to_world_pose_9d_batch)
for i in range(batch_size):
seq_len = len(scanned_n_to_world_pose_9d_batch[i])
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d_batch[i].to(device) # Tensor(S x 9)
scanned_pts_mask = scanned_pts_mask_batch[i] # Tensor(S x N)
per_point_feat = per_point_feat_batch[i] # Tensor(N x Dp)
partial_point_feat_seq = []
for j in range(seq_len):
partial_per_point_feat = per_point_feat[scanned_pts_mask[j]]
if partial_per_point_feat.shape[0] == 0:
partial_point_feat = torch.zeros(per_point_feat.shape[1], device=device)
else:
partial_point_feat = torch.mean(partial_per_point_feat, dim=0) # Tensor(Dp)
partial_point_feat_seq.append(partial_point_feat)
partial_point_feat_seq = torch.stack(partial_point_feat_seq, dim=0) # Tensor(S x Dp)
for scanned_n_to_world_pose_9d in scanned_n_to_world_pose_9d_batch:
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d.to(device) # Tensor(S x 9)
pose_feat_seq = self.pose_encoder.encode_pose(scanned_n_to_world_pose_9d) # Tensor(S x Dp)
seq_embedding = pose_feat_seq
seq_embedding = torch.cat([partial_point_feat_seq, pose_feat_seq], dim=-1)
embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp))
seq_feat = self.seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg))
if torch.isnan(main_feat).any():
for i in range(len(main_feat)):
if torch.isnan(main_feat[i]).any():
scanned_pts_mask = scanned_pts_mask_batch[i]
Log.info(f"scanned_pts_mask shape: {scanned_pts_mask.shape}")
Log.info(f"scanned_pts_mask sum: {scanned_pts_mask.sum()}")
import ipdb
ipdb.set_trace()
Log.error("nan in main_feat", True)
return main_feat

View File

@@ -8,7 +8,7 @@ import torch
import os
import sys
sys.path.append(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction")
sys.path.append(r"/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction")
from utils.data_load import DataLoadUtil
from utils.pose import PoseUtil
@@ -47,6 +47,8 @@ class SeqReconstructionDataset(BaseDataset):
with open(self.split_file_path, "r") as f:
for line in f:
scene_name = line.strip()
if not os.path.exists(os.path.join(self.root_dir, scene_name)):
continue
scene_name_list.append(scene_name)
return scene_name_list
@@ -58,29 +60,19 @@ class SeqReconstructionDataset(BaseDataset):
total = len(self.scene_name_list)
for idx, scene_name in enumerate(self.scene_name_list):
print(f"processing {scene_name} ({idx}/{total})")
seq_num = DataLoadUtil.get_label_num(self.root_dir, scene_name)
scene_max_coverage_rate = 0
max_coverage_rate_list = []
scene_max_cr_idx = 0
for seq_idx in range(seq_num):
label_path = DataLoadUtil.get_label_path(
self.root_dir, scene_name, seq_idx
)
label_data = DataLoadUtil.load_label(label_path)
max_coverage_rate = label_data["max_coverage_rate"]
if max_coverage_rate > scene_max_coverage_rate:
scene_max_coverage_rate = max_coverage_rate
scene_max_cr_idx = seq_idx
max_coverage_rate_list.append(max_coverage_rate)
best_label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name, scene_max_cr_idx)
best_label_data = DataLoadUtil.load_label(best_label_path)
first_frame = best_label_data["best_sequence"][0]
best_seq_len = len(best_label_data["best_sequence"])
frame_len = DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)
for i in range(frame_len):
path = DataLoadUtil.get_path(self.root_dir, scene_name, i)
pts = DataLoadUtil.load_from_preprocessed_pts(path, "npy")
if pts.shape[0] == 0:
continue
datalist.append({
"scene_name": scene_name,
"first_frame": first_frame,
"best_seq_len": best_seq_len,
"max_coverage_rate": scene_max_coverage_rate,
"first_frame": i,
"best_seq_len": -1,
"max_coverage_rate": 1.0,
"label_idx": scene_max_cr_idx,
})
return datalist
@@ -131,8 +123,7 @@ class SeqReconstructionDataset(BaseDataset):
scanned_n_to_world_pose,
) = ([], [], [])
view = data_item_info["first_frame"]
frame_idx = view[0]
coverage_rate = view[1]
frame_idx = view
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
cam_info = DataLoadUtil.load_cam_info(view_path, binocular=True)
@@ -144,7 +135,7 @@ class SeqReconstructionDataset(BaseDataset):
target_point_cloud, self.pts_num
)
scanned_views_pts.append(downsampled_target_point_cloud)
scanned_coverages_rate.append(coverage_rate)
n_to_world_6d = PoseUtil.matrix_to_rotation_6d_numpy(
np.asarray(n_to_world_pose[:3, :3])
)
@@ -161,7 +152,6 @@ class SeqReconstructionDataset(BaseDataset):
gt_pts = self.seq_combined_pts(scene_name, frame_list)
data_item = {
"first_scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
"first_scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1)
"first_scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
"seq_max_coverage_rate": max_coverage_rate, # Float, range(0, 1)
"best_seq_len": best_seq_len, # Int
@@ -180,39 +170,35 @@ class SeqReconstructionDataset(BaseDataset):
# -------------- Debug ---------------- #
if __name__ == "__main__":
import torch
from tqdm import tqdm
import pickle
import os
seed = 0
torch.manual_seed(seed)
np.random.seed(seed)
'''
OmniObject3d_test:
root_dir: "H:\\AI\\Datasets\\packed_test_data"
model_dir: "H:\\AI\\Datasets\\scaled_object_meshes"
source: seq_reconstruction_dataset
split_file: "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt"
type: test
filter_degree: 75
eval_list:
- pose_diff
- coverage_rate_increase
ratio: 0.1
batch_size: 1
num_workers: 12
pts_num: 8192
load_from_preprocess: True
'''
config = {
"root_dir": "H:\\AI\\Datasets\\packed_test_data",
"root_dir": "/media/hofee/data/data/new_testset",
"source": "seq_reconstruction_dataset",
"split_file": "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt",
"split_file": "/media/hofee/data/data/OmniObject3d_test.txt",
"load_from_preprocess": True,
"ratio": 1,
"filter_degree": 75,
"num_workers": 0,
"pts_num": 8192,
"type": "test",
"type": namespace.Mode.TEST,
}
ds = SeqReconstructionDataset(config)
print(len(ds))
print(ds.__getitem__(10))
output_dir = "/media/hofee/data/data/new_testset_output"
os.makedirs(output_dir, exist_ok=True)
ds = SeqReconstructionDataset(config)
for i in tqdm(range(len(ds)), desc="processing dataset"):
output_path = os.path.join(output_dir, f"item_{i}.pkl")
item = ds.__getitem__(i)
for key, value in item.items():
if isinstance(value, np.ndarray):
item[key] = value.tolist()
#import ipdb; ipdb.set_trace()
with open(output_path, "wb") as f:
pickle.dump(item, f)

View File

@@ -15,21 +15,19 @@ from utils.data_load import DataLoadUtil
from utils.pose import PoseUtil
from utils.pts import PtsUtil
@stereotype.dataset("seq_reconstruction_dataset_preprocessed")
class SeqReconstructionDatasetPreprocessed(BaseDataset):
def __init__(self, config):
super(SeqReconstructionDatasetPreprocessed, self).__init__(config)
self.config = config
self.root_dir = config["root_dir"]
self.real_root_dir = r"H:\AI\Datasets\packed_test_data"
self.real_root_dir = r"/media/hofee/data/data/new_testset"
self.item_list = os.listdir(self.root_dir)
def __getitem__(self, index):
data = pickle.load(open(os.path.join(self.root_dir, self.item_list[index]), "rb"))
data_item = {
"first_scanned_pts": np.asarray(data["first_scanned_pts"], dtype=np.float32), # Ndarray(S x Nv x 3)
"first_scanned_coverage_rate": data["first_scanned_coverage_rate"], # List(S): Float, range(0, 1)
"first_scanned_n_to_world_pose_9d": np.asarray(data["first_scanned_n_to_world_pose_9d"], dtype=np.float32), # Ndarray(S x 9)
"seq_max_coverage_rate": data["seq_max_coverage_rate"], # Float, range(0, 1)
"best_seq_len": data["best_seq_len"], # Int
@@ -43,7 +41,6 @@ class SeqReconstructionDatasetPreprocessed(BaseDataset):
def __len__(self):
return len(self.item_list)
# -------------- Debug ---------------- #
if __name__ == "__main__":
import torch

View File

@@ -25,6 +25,7 @@ class InferencerServer(Runner):
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
self.pipeline = self.pipeline.to(self.device)
self.pts_num = 8192
self.voxel_size = 0.002
''' Experiment '''
self.load_experiment("inferencer_server")
@@ -34,20 +35,14 @@ class InferencerServer(Runner):
scanned_pts = data["scanned_pts"]
scanned_n_to_world_pose_9d = data["scanned_n_to_world_pose_9d"]
combined_scanned_views_pts = np.concatenate(scanned_pts, axis=0)
fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud(
combined_scanned_views_pts, self.pts_num, require_idx=True
voxel_downsampled_combined_scanned_pts = PtsUtil.voxel_downsample_point_cloud(
combined_scanned_views_pts, self.voxel_size
)
fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud(
voxel_downsampled_combined_scanned_pts, self.pts_num, require_idx=True
)
# combined_scanned_views_pts_mask = np.zeros(len(scanned_pts), dtype=np.uint8)
# start_idx = 0
# for i in range(len(scanned_pts)):
# end_idx = start_idx + len(scanned_pts[i])
# combined_scanned_views_pts_mask[start_idx:end_idx] = i
# start_idx = end_idx
# fps_downsampled_combined_scanned_pts_mask = combined_scanned_views_pts_mask[fps_idx]
input_data["scanned_pts"] = scanned_pts
# input_data["scanned_pts_mask"] = np.asarray(fps_downsampled_combined_scanned_pts_mask, dtype=np.uint8)
input_data["scanned_n_to_world_pose_9d"] = np.asarray(scanned_n_to_world_pose_9d, dtype=np.float32)
input_data["combined_scanned_pts"] = np.asarray(fps_downsampled_combined_scanned_pts, dtype=np.float32)
return input_data

View File

@@ -23,11 +23,15 @@ from utils.data_load import DataLoadUtil
@stereotype.runner("inferencer")
class Inferencer(Runner):
def __init__(self, config_path):
super().__init__(config_path)
self.script_path = ConfigManager.get(namespace.Stereotype.RUNNER, "blender_script_path")
self.output_dir = ConfigManager.get(namespace.Stereotype.RUNNER, "output_dir")
self.voxel_size = ConfigManager.get(namespace.Stereotype.RUNNER, "voxel_size")
self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area")
CM = 0.01
self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) **2
''' Pipeline '''
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
@@ -74,20 +78,25 @@ class Inferencer(Runner):
total=int(len(test_set))
for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100):
data = test_set.__getitem__(i)
scene_name = data["scene_name"]
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl")
if os.path.exists(inference_result_path):
Log.info(f"Inference result already exists for scene: {scene_name}")
continue
try:
data = test_set.__getitem__(i)
scene_name = data["scene_name"]
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl")
if os.path.exists(inference_result_path):
Log.info(f"Inference result already exists for scene: {scene_name}")
continue
status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total)
output = self.predict_sequence(data)
self.save_inference_result(test_set_name, data["scene_name"], output)
status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total)
output = self.predict_sequence(data)
self.save_inference_result(test_set_name, data["scene_name"], output)
except Exception as e:
print(e)
Log.error(f"Error, {e}")
continue
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 7):
def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 10, max_success=3):
scene_name = data["scene_name"]
Log.info(f"Processing scene: {scene_name}")
status_manager.set_status("inference", "inferencer", "scene", scene_name)
@@ -106,14 +115,18 @@ class Inferencer(Runner):
''' data for inference '''
input_data = {}
input_data["combined_scanned_pts"] = torch.tensor(data["first_scanned_pts"][0], dtype=torch.float32).to(self.device).unsqueeze(0)
input_data["scanned_pts_mask"] = [torch.zeros(input_data["combined_scanned_pts"].shape[1], dtype=torch.bool).to(self.device).unsqueeze(0)]
input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)]
input_data["mode"] = namespace.Mode.TEST
input_pts_N = input_data["combined_scanned_pts"].shape[1]
root = os.path.dirname(scene_path)
display_table_info = DataLoadUtil.get_display_table_info(root, scene_name)
radius = display_table_info["radius"]
scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius))
first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
scanned_view_pts = [first_frame_target_pts]
history_indices = [first_frame_scan_points_indices]
@@ -124,12 +137,11 @@ class Inferencer(Runner):
retry = 0
pred_cr_seq = [last_pred_cr]
success = 0
last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], voxel_threshold).shape[0]
import time
while len(pred_cr_seq) < max_iter and retry < max_retry:
start_time = time.time()
while len(pred_cr_seq) < max_iter and retry < max_retry and success < max_success:
Log.green(f"iter: {len(pred_cr_seq)}, retry: {retry}/{max_retry}, success: {success}/{max_success}")
output = self.pipeline(input_data)
end_time = time.time()
print(f"Time taken for inference: {end_time - start_time} seconds")
pred_pose_9d = output["pred_pose_9d"]
pred_pose = torch.eye(4, device=pred_pose_9d.device)
@@ -137,7 +149,6 @@ class Inferencer(Runner):
pred_pose[:3,3] = pred_pose_9d[0,6:]
try:
start_time = time.time()
new_target_pts, new_target_normals, new_scan_points_indices = RenderUtil.render_pts(pred_pose, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
#import ipdb; ipdb.set_trace()
if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold):
@@ -146,17 +157,16 @@ class Inferencer(Runner):
curr_overlap_area_threshold = overlap_area_threshold * 0.5
downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold)
overlap, new_added_pts_num = ReconstructionUtil.check_overlap(downsampled_new_target_pts, down_sampled_model_pts, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True)
overlap, _ = ReconstructionUtil.check_overlap(downsampled_new_target_pts, down_sampled_model_pts, overlap_area_threshold = curr_overlap_area_threshold, voxel_size=voxel_threshold, require_new_added_pts_num = True)
if not overlap:
Log.yellow("no overlap!")
retry += 1
retry_overlap_pose.append(pred_pose.cpu().numpy().tolist())
continue
history_indices.append(new_scan_points_indices)
end_time = time.time()
print(f"Time taken for rendering: {end_time - start_time} seconds")
except Exception as e:
Log.warning(f"Error in scene {scene_path}, {e}")
Log.error(f"Error in scene {scene_path}, {e}")
print("current pose: ", pred_pose)
print("curr_pred_cr: ", last_pred_cr)
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
@@ -164,44 +174,62 @@ class Inferencer(Runner):
continue
if new_target_pts.shape[0] == 0:
print("no pts in new target")
Log.red("no pts in new target")
retry_no_pts_pose.append(pred_pose.cpu().numpy().tolist())
retry += 1
continue
start_time = time.time()
pred_cr, covered_pts_num = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
end_time = time.time()
print(f"Time taken for coverage rate computation: {end_time - start_time} seconds")
print(pred_cr, last_pred_cr, " max: ", data["seq_max_coverage_rate"])
print("new added pts num: ", new_added_pts_num)
pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
Log.yellow(f"{pred_cr}, {last_pred_cr}, max: , {data['seq_max_coverage_rate']}")
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
print("max coverage rate reached!: ", pred_cr)
success += 1
elif new_added_pts_num < 5:
#success += 1
print("min added pts num reached!: ", new_added_pts_num)
if pred_cr <= last_pred_cr + cr_increase_threshold:
retry += 1
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
continue
retry = 0
pred_cr_seq.append(pred_cr)
scanned_view_pts.append(new_target_pts)
down_sampled_new_pts_world = PtsUtil.random_downsample_point_cloud(new_target_pts, input_pts_N)
new_pts = down_sampled_new_pts_world
input_data["scanned_n_to_world_pose_9d"] = [torch.cat([input_data["scanned_n_to_world_pose_9d"][0], pred_pose_9d], dim=0)]
start_indices = [0]
total_points = 0
for pts in scanned_view_pts:
total_points += pts.shape[0]
start_indices.append(total_points)
combined_scanned_pts = np.vstack(scanned_view_pts)
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold)
random_downsampled_combined_scanned_pts_np, random_downsample_idx = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N, require_idx=True)
all_idx_unique = np.arange(len(voxel_downsampled_combined_scanned_pts_np))
all_random_downsample_idx = all_idx_unique[random_downsample_idx]
scanned_pts_mask = []
for idx, start_idx in enumerate(start_indices):
if idx == len(start_indices) - 1:
break
end_idx = start_indices[idx+1]
view_inverse = inverse[start_idx:end_idx]
view_unique_downsampled_idx = np.unique(view_inverse)
view_unique_downsampled_idx_set = set(view_unique_downsampled_idx)
mask = np.array([idx in view_unique_downsampled_idx_set for idx in all_random_downsample_idx])
scanned_pts_mask.append(mask)
combined_scanned_pts = np.concatenate([input_data["combined_scanned_pts"][0].cpu().numpy(), new_pts], axis=0)
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_pts, 0.002)
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N)
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
#import ipdb; ipdb.set_trace()
input_data["scanned_pts_mask"] = [torch.tensor(scanned_pts_mask, dtype=torch.bool)]
if success > 3:
break
last_pred_cr = pred_cr
pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0]
Log.info(f"delta pts num:,{pts_num - last_pts_num },{pts_num}, {last_pts_num}")
if pts_num - last_pts_num < self.min_new_pts_num and pred_cr <= data["seq_max_coverage_rate"] - 1e-2:
retry += 1
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
Log.red(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
elif pts_num - last_pts_num < self.min_new_pts_num and pred_cr > data["seq_max_coverage_rate"] - 1e-2:
success += 1
Log.success(f"delta pts num < {self.min_new_pts_num}:, {pts_num}, {last_pts_num}")
last_pts_num = pts_num
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()
result = {
@@ -226,6 +254,14 @@ class Inferencer(Runner):
return result
def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
idx_sort = np.argsort(inverse)
idx_unique = idx_sort[np.cumsum(counts)-counts]
downsampled_points = point_cloud[idx_unique]
return downsampled_points, inverse
def compute_coverage_rate(self, scanned_view_pts, new_pts, model_pts, threshold=0.005):
if new_pts is not None:
new_scanned_view_pts = scanned_view_pts + [new_pts]

View File

@@ -9,7 +9,7 @@ class ViewGenerator(Runner):
self.config_path = config_path
def run(self):
result = subprocess.run(['/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', '../blender/run_blender.py', '--', self.config_path])
result = subprocess.run(['blender', '-b', '-P', '../blender/run_blender.py', '--', self.config_path])
print()
def create_experiment(self, backup_name=None):

View File

@@ -84,12 +84,10 @@ class RenderUtil:
params_data_path = os.path.join(temp_dir, "params.json")
with open(params_data_path, 'w') as f:
json.dump(params, f)
start_time = time.time()
result = subprocess.run([
'blender', '-b', '-P', script_path, '--', temp_dir
'/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', script_path, '--', temp_dir
], capture_output=True, text=True)
end_time = time.time()
print(f"-- Time taken for blender: {end_time - start_time} seconds")
# print(result)
path = os.path.join(temp_dir, "tmp")
cam_info = DataLoadUtil.load_cam_info(path, binocular=True)
depth_L, depth_R = DataLoadUtil.load_depth(
@@ -97,7 +95,6 @@ class RenderUtil:
cam_info["far_plane"],
binocular=True
)
start_time = time.time()
mask_L, mask_R = DataLoadUtil.load_seg(path, binocular=True)
normal_L = DataLoadUtil.load_normal(path, binocular=True, left_only=True)
''' target points '''
@@ -134,7 +131,5 @@ class RenderUtil:
if not has_points:
target_points = np.zeros((0, 3))
target_normals = np.zeros((0, 3))
end_time = time.time()
print(f"-- Time taken for processing: {end_time - start_time} seconds")
#import ipdb; ipdb.set_trace()
return target_points, target_normals, scan_points_indices