2 Commits

Author SHA1 Message Date
ad7a1c9cdf solve merge 2025-04-11 20:10:56 +00:00
be835aded4 finish partial_global inference 2024-11-26 15:40:00 +08:00
3 changed files with 15 additions and 3 deletions

View File

@@ -70,7 +70,7 @@ module:
global_feat: True
feature_transform: False
transformer_seq_encoder:
embed_dim: 256
embed_dim: 320
num_heads: 4
ffn_dim: 256
num_layers: 3

View File

@@ -90,6 +90,7 @@ class NBVReconstructionPipeline(nn.Module):
scanned_n_to_world_pose_9d_batch = data[
"scanned_n_to_world_pose_9d"
] # List(B): Tensor(S x 9)
scanned_pts_mask_batch = data["scanned_pts_mask"] # List(B): Tensor(S x N)
scanned_pts_mask_batch = data["scanned_pts_mask"] # List(B): Tensor(N)
@@ -136,4 +137,4 @@ class NBVReconstructionPipeline(nn.Module):
ipdb.set_trace()
Log.error("nan in main_feat", True)
return main_feat
return main_feat

View File

@@ -92,7 +92,8 @@ class Inferencer(Runner):
output = self.predict_sequence(data)
self.save_inference_result(test_set_name, data["scene_name"], output)
except Exception as e:
Log.error(f"Error in scene {scene_name}, {e}")
print(e)
Log.error(f"Error, {e}")
continue
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
@@ -116,7 +117,9 @@ class Inferencer(Runner):
''' data for inference '''
input_data = {}
input_data["combined_scanned_pts"] = torch.tensor(data["first_scanned_pts"][0], dtype=torch.float32).to(self.device).unsqueeze(0)
input_data["scanned_pts_mask"] = [torch.zeros(input_data["combined_scanned_pts"].shape[1], dtype=torch.bool).to(self.device).unsqueeze(0)]
input_data["scanned_n_to_world_pose_9d"] = [torch.tensor(data["first_scanned_n_to_world_pose_9d"], dtype=torch.float32).to(self.device)]
input_data["mode"] = namespace.Mode.TEST
input_pts_N = input_data["combined_scanned_pts"].shape[1]
@@ -254,6 +257,14 @@ class Inferencer(Runner):
return result
def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
idx_sort = np.argsort(inverse)
idx_unique = idx_sort[np.cumsum(counts)-counts]
downsampled_points = point_cloud[idx_unique]
return downsampled_points, inverse
def compute_coverage_rate(self, scanned_view_pts, new_pts, model_pts, threshold=0.005):
if new_pts is not None:
new_scanned_view_pts = scanned_view_pts + [new_pts]