upd inference
This commit is contained in:
parent
5bcd0fc6e3
commit
6f427785b3
@ -15,7 +15,7 @@ runner:
|
|||||||
- OmniObject3d_test
|
- OmniObject3d_test
|
||||||
|
|
||||||
blender_script_path: "C:\\Document\\Local Project\\nbv_rec\\blender\\data_renderer.py"
|
blender_script_path: "C:\\Document\\Local Project\\nbv_rec\\blender\\data_renderer.py"
|
||||||
output_dir: "C:\\Document\\Datasets\\inference_scan_pts_overlap_global_full_on_testset"
|
output_dir: "C:\\Document\\Datasets\\debug_output"
|
||||||
pipeline: nbv_reconstruction_pipeline
|
pipeline: nbv_reconstruction_pipeline
|
||||||
voxel_size: 0.003
|
voxel_size: 0.003
|
||||||
|
|
||||||
|
@ -8,11 +8,11 @@ runner:
|
|||||||
root_dir: experiments
|
root_dir: experiments
|
||||||
generate:
|
generate:
|
||||||
port: 5002
|
port: 5002
|
||||||
from: 600
|
from: 0
|
||||||
to: -1 # -1 means all
|
to: -1 # -1 means all
|
||||||
object_dir: /media/hofee/data/data/object_meshes_part1
|
object_dir: C:\\Document\\Datasets\\ball_meshes
|
||||||
table_model_path: "/media/hofee/data/data/others/table.obj"
|
table_model_path: C:\\Document\\Datasets\\table.obj
|
||||||
output_dir: /media/hofee/repository/data_part_1
|
output_dir: C:\\Document\\Datasets\\debug_ball_generate_view
|
||||||
binocular_vision: true
|
binocular_vision: true
|
||||||
plane_size: 10
|
plane_size: 10
|
||||||
max_views: 512
|
max_views: 512
|
||||||
|
@ -76,6 +76,8 @@ class Inferencer(Runner):
|
|||||||
for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100):
|
for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100):
|
||||||
data = test_set.__getitem__(i)
|
data = test_set.__getitem__(i)
|
||||||
scene_name = data["scene_name"]
|
scene_name = data["scene_name"]
|
||||||
|
if scene_name != "omniobject3d-suitcase_001":
|
||||||
|
continue
|
||||||
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl")
|
inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl")
|
||||||
if os.path.exists(inference_result_path):
|
if os.path.exists(inference_result_path):
|
||||||
Log.info(f"Inference result already exists for scene: {scene_name}")
|
Log.info(f"Inference result already exists for scene: {scene_name}")
|
||||||
@ -87,7 +89,7 @@ class Inferencer(Runner):
|
|||||||
|
|
||||||
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
|
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
|
||||||
|
|
||||||
def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 7):
|
def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 5):
|
||||||
scene_name = data["scene_name"]
|
scene_name = data["scene_name"]
|
||||||
Log.info(f"Processing scene: {scene_name}")
|
Log.info(f"Processing scene: {scene_name}")
|
||||||
status_manager.set_status("inference", "inferencer", "scene", scene_name)
|
status_manager.set_status("inference", "inferencer", "scene", scene_name)
|
||||||
@ -110,10 +112,12 @@ class Inferencer(Runner):
|
|||||||
input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)]
|
input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)]
|
||||||
input_data["mode"] = namespace.Mode.TEST
|
input_data["mode"] = namespace.Mode.TEST
|
||||||
input_pts_N = input_data["combined_scanned_pts"].shape[1]
|
input_pts_N = input_data["combined_scanned_pts"].shape[1]
|
||||||
|
|
||||||
root = os.path.dirname(scene_path)
|
root = os.path.dirname(scene_path)
|
||||||
display_table_info = DataLoadUtil.get_display_table_info(root, scene_name)
|
display_table_info = DataLoadUtil.get_display_table_info(root, scene_name)
|
||||||
radius = display_table_info["radius"]
|
radius = display_table_info["radius"]
|
||||||
scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius))
|
scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius))
|
||||||
|
|
||||||
first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
|
first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose)
|
||||||
scanned_view_pts = [first_frame_target_pts]
|
scanned_view_pts = [first_frame_target_pts]
|
||||||
history_indices = [first_frame_scan_points_indices]
|
history_indices = [first_frame_scan_points_indices]
|
||||||
@ -124,6 +128,7 @@ class Inferencer(Runner):
|
|||||||
retry = 0
|
retry = 0
|
||||||
pred_cr_seq = [last_pred_cr]
|
pred_cr_seq = [last_pred_cr]
|
||||||
success = 0
|
success = 0
|
||||||
|
last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], 0.002)
|
||||||
import time
|
import time
|
||||||
while len(pred_cr_seq) < max_iter and retry < max_retry:
|
while len(pred_cr_seq) < max_iter and retry < max_retry:
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
@ -174,13 +179,9 @@ class Inferencer(Runner):
|
|||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
print(f"Time taken for coverage rate computation: {end_time - start_time} seconds")
|
print(f"Time taken for coverage rate computation: {end_time - start_time} seconds")
|
||||||
print(pred_cr, last_pred_cr, " max: ", data["seq_max_coverage_rate"])
|
print(pred_cr, last_pred_cr, " max: ", data["seq_max_coverage_rate"])
|
||||||
print("new added pts num: ", new_added_pts_num)
|
|
||||||
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
|
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
|
||||||
print("max coverage rate reached!: ", pred_cr)
|
print("max coverage rate reached!: ", pred_cr)
|
||||||
success += 1
|
success += 1
|
||||||
elif new_added_pts_num < 5:
|
|
||||||
#success += 1
|
|
||||||
print("min added pts num reached!: ", new_added_pts_num)
|
|
||||||
if pred_cr <= last_pred_cr + cr_increase_threshold:
|
if pred_cr <= last_pred_cr + cr_increase_threshold:
|
||||||
retry += 1
|
retry += 1
|
||||||
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
|
retry_duplication_pose.append(pred_pose.cpu().numpy().tolist())
|
||||||
@ -202,6 +203,12 @@ class Inferencer(Runner):
|
|||||||
if success > 3:
|
if success > 3:
|
||||||
break
|
break
|
||||||
last_pred_cr = pred_cr
|
last_pred_cr = pred_cr
|
||||||
|
pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0]
|
||||||
|
if pts_num - last_pts_num < 10:
|
||||||
|
retry += 1
|
||||||
|
print("delta pts num < 10:", pts_num, last_pts_num)
|
||||||
|
last_pts_num = pts_num
|
||||||
|
|
||||||
|
|
||||||
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()
|
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()
|
||||||
result = {
|
result = {
|
||||||
|
@ -9,7 +9,7 @@ class ViewGenerator(Runner):
|
|||||||
self.config_path = config_path
|
self.config_path = config_path
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
result = subprocess.run(['/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', '../blender/run_blender.py', '--', self.config_path])
|
result = subprocess.run(['blender', '-b', '-P', '../blender/run_blender.py', '--', self.config_path])
|
||||||
print()
|
print()
|
||||||
|
|
||||||
def create_experiment(self, backup_name=None):
|
def create_experiment(self, backup_name=None):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user