From 6f427785b3395f681951bead746e439da35d9fa7 Mon Sep 17 00:00:00 2001 From: hofee <64160135+GitHofee@users.noreply.github.com> Date: Tue, 5 Nov 2024 12:17:20 -0600 Subject: [PATCH] upd inference --- configs/local/inference_config.yaml | 2 +- configs/local/view_generate_config.yaml | 8 ++++---- runners/inferencer.py | 17 ++++++++++++----- runners/view_generator.py | 2 +- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/configs/local/inference_config.yaml b/configs/local/inference_config.yaml index 13a7acc..1e6d89b 100644 --- a/configs/local/inference_config.yaml +++ b/configs/local/inference_config.yaml @@ -15,7 +15,7 @@ runner: - OmniObject3d_test blender_script_path: "C:\\Document\\Local Project\\nbv_rec\\blender\\data_renderer.py" - output_dir: "C:\\Document\\Datasets\\inference_scan_pts_overlap_global_full_on_testset" + output_dir: "C:\\Document\\Datasets\\debug_output" pipeline: nbv_reconstruction_pipeline voxel_size: 0.003 diff --git a/configs/local/view_generate_config.yaml b/configs/local/view_generate_config.yaml index b58d672..803bae8 100644 --- a/configs/local/view_generate_config.yaml +++ b/configs/local/view_generate_config.yaml @@ -8,11 +8,11 @@ runner: root_dir: experiments generate: port: 5002 - from: 600 + from: 0 to: -1 # -1 means all - object_dir: /media/hofee/data/data/object_meshes_part1 - table_model_path: "/media/hofee/data/data/others/table.obj" - output_dir: /media/hofee/repository/data_part_1 + object_dir: C:\\Document\\Datasets\\ball_meshes + table_model_path: C:\\Document\\Datasets\\table.obj + output_dir: C:\\Document\\Datasets\\debug_ball_generate_view binocular_vision: true plane_size: 10 max_views: 512 diff --git a/runners/inferencer.py b/runners/inferencer.py index a38b9e8..0ba8a4f 100644 --- a/runners/inferencer.py +++ b/runners/inferencer.py @@ -76,6 +76,8 @@ class Inferencer(Runner): for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100): data = test_set.__getitem__(i) scene_name = data["scene_name"] + if scene_name != "omniobject3d-suitcase_001": + continue inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl") if os.path.exists(inference_result_path): Log.info(f"Inference result already exists for scene: {scene_name}") @@ -87,7 +89,7 @@ class Inferencer(Runner): status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list)) - def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 7): + def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry = 5): scene_name = data["scene_name"] Log.info(f"Processing scene: {scene_name}") status_manager.set_status("inference", "inferencer", "scene", scene_name) @@ -110,10 +112,12 @@ class Inferencer(Runner): input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)] input_data["mode"] = namespace.Mode.TEST input_pts_N = input_data["combined_scanned_pts"].shape[1] + root = os.path.dirname(scene_path) display_table_info = DataLoadUtil.get_display_table_info(root, scene_name) radius = display_table_info["radius"] scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius)) + first_frame_target_pts, first_frame_target_normals, first_frame_scan_points_indices = RenderUtil.render_pts(first_frame_to_world, scene_path, self.script_path, scan_points, voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose) scanned_view_pts = [first_frame_target_pts] history_indices = [first_frame_scan_points_indices] @@ -124,6 +128,7 @@ class Inferencer(Runner): retry = 0 pred_cr_seq = [last_pred_cr] success = 0 + last_pts_num = PtsUtil.voxel_downsample_point_cloud(data["first_scanned_pts"][0], 0.002) import time while len(pred_cr_seq) < max_iter and retry < max_retry: start_time = time.time() @@ -174,13 +179,9 @@ class Inferencer(Runner): end_time = time.time() print(f"Time taken for coverage rate computation: {end_time - start_time} seconds") print(pred_cr, last_pred_cr, " max: ", data["seq_max_coverage_rate"]) - print("new added pts num: ", new_added_pts_num) if pred_cr >= data["seq_max_coverage_rate"] - 1e-3: print("max coverage rate reached!: ", pred_cr) success += 1 - elif new_added_pts_num < 5: - #success += 1 - print("min added pts num reached!: ", new_added_pts_num) if pred_cr <= last_pred_cr + cr_increase_threshold: retry += 1 retry_duplication_pose.append(pred_pose.cpu().numpy().tolist()) @@ -202,6 +203,12 @@ class Inferencer(Runner): if success > 3: break last_pred_cr = pred_cr + pts_num = voxel_downsampled_combined_scanned_pts_np.shape[0] + if pts_num - last_pts_num < 10: + retry += 1 + print("delta pts num < 10:", pts_num, last_pts_num) + last_pts_num = pts_num + input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist() result = { diff --git a/runners/view_generator.py b/runners/view_generator.py index 634ccbf..2e28c44 100644 --- a/runners/view_generator.py +++ b/runners/view_generator.py @@ -9,7 +9,7 @@ class ViewGenerator(Runner): self.config_path = config_path def run(self): - result = subprocess.run(['/home/hofee/blender-4.0.2-linux-x64/blender', '-b', '-P', '../blender/run_blender.py', '--', self.config_path]) + result = subprocess.run(['blender', '-b', '-P', '../blender/run_blender.py', '--', self.config_path]) print() def create_experiment(self, backup_name=None):