solve merge
This commit is contained in:
commit
a883a31968
48
preprocess/pack_preprocessed_data.py
Normal file
48
preprocess/pack_preprocessed_data.py
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
def pack_scene_data(root, scene, output_dir):
|
||||||
|
scene_dir = os.path.join(output_dir, scene)
|
||||||
|
if not os.path.exists(scene_dir):
|
||||||
|
os.makedirs(scene_dir)
|
||||||
|
|
||||||
|
pts_dir = os.path.join(root, scene, "pts")
|
||||||
|
if os.path.exists(pts_dir):
|
||||||
|
shutil.move(pts_dir, os.path.join(scene_dir, "pts"))
|
||||||
|
|
||||||
|
scan_points_indices_dir = os.path.join(root, scene, "scan_points_indices")
|
||||||
|
if os.path.exists(scan_points_indices_dir):
|
||||||
|
shutil.move(scan_points_indices_dir, os.path.join(scene_dir, "scan_points_indices"))
|
||||||
|
|
||||||
|
scan_points_file = os.path.join(root, scene, "scan_points.txt")
|
||||||
|
if os.path.exists(scan_points_file):
|
||||||
|
shutil.move(scan_points_file, os.path.join(scene_dir, "scan_points.txt"))
|
||||||
|
|
||||||
|
model_pts_nrm_file = os.path.join(root, scene, "points_and_normals.txt")
|
||||||
|
if os.path.exists(model_pts_nrm_file):
|
||||||
|
shutil.move(model_pts_nrm_file, os.path.join(scene_dir, "points_and_normals.txt"))
|
||||||
|
|
||||||
|
camera_dir = os.path.join(root, scene, "camera_params")
|
||||||
|
if os.path.exists(camera_dir):
|
||||||
|
shutil.move(camera_dir, os.path.join(scene_dir, "camera_params"))
|
||||||
|
|
||||||
|
scene_info_file = os.path.join(root, scene, "scene_info.json")
|
||||||
|
if os.path.exists(scene_info_file):
|
||||||
|
shutil.move(scene_info_file, os.path.join(scene_dir, "scene_info.json"))
|
||||||
|
|
||||||
|
def pack_all_scenes(root, scene_list, output_dir):
|
||||||
|
for idx, scene in enumerate(scene_list):
|
||||||
|
print(f"正在打包场景 {scene} ({idx+1}/{len(scene_list)})")
|
||||||
|
pack_scene_data(root, scene, output_dir)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
root = r"H:\AI\Datasets\nbv_rec_part2"
|
||||||
|
output_dir = r"H:\AI\Datasets\scene_info_part2"
|
||||||
|
scene_list = os.listdir(root)
|
||||||
|
from_idx = 0
|
||||||
|
to_idx = len(scene_list)
|
||||||
|
print(f"正在打包场景 {scene_list[from_idx:to_idx]}")
|
||||||
|
|
||||||
|
pack_all_scenes(root, scene_list[from_idx:to_idx], output_dir)
|
||||||
|
print("打包完成")
|
||||||
|
|
41
preprocess/pack_upload_data.py
Normal file
41
preprocess/pack_upload_data.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
def pack_scene_data(root, scene, output_dir):
|
||||||
|
scene_dir = os.path.join(output_dir, scene)
|
||||||
|
if not os.path.exists(scene_dir):
|
||||||
|
os.makedirs(scene_dir)
|
||||||
|
|
||||||
|
pts_dir = os.path.join(root, scene, "pts")
|
||||||
|
if os.path.exists(pts_dir):
|
||||||
|
shutil.move(pts_dir, os.path.join(scene_dir, "pts"))
|
||||||
|
|
||||||
|
camera_dir = os.path.join(root, scene, "camera_params")
|
||||||
|
if os.path.exists(camera_dir):
|
||||||
|
shutil.move(camera_dir, os.path.join(scene_dir, "camera_params"))
|
||||||
|
|
||||||
|
scene_info_file = os.path.join(root, scene, "scene_info.json")
|
||||||
|
if os.path.exists(scene_info_file):
|
||||||
|
shutil.move(scene_info_file, os.path.join(scene_dir, "scene_info.json"))
|
||||||
|
|
||||||
|
label_dir = os.path.join(root, scene, "label")
|
||||||
|
if os.path.exists(label_dir):
|
||||||
|
shutil.move(label_dir, os.path.join(scene_dir, "label"))
|
||||||
|
|
||||||
|
|
||||||
|
def pack_all_scenes(root, scene_list, output_dir):
|
||||||
|
for idx, scene in enumerate(scene_list):
|
||||||
|
print(f"packing {scene} ({idx+1}/{len(scene_list)})")
|
||||||
|
pack_scene_data(root, scene, output_dir)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
root = r"H:\AI\Datasets\nbv_rec_part2"
|
||||||
|
output_dir = r"H:\AI\Datasets\upload_part2"
|
||||||
|
scene_list = os.listdir(root)
|
||||||
|
from_idx = 0
|
||||||
|
to_idx = len(scene_list)
|
||||||
|
print(f"packing {scene_list[from_idx:to_idx]}")
|
||||||
|
|
||||||
|
pack_all_scenes(root, scene_list[from_idx:to_idx], output_dir)
|
||||||
|
print("packing done")
|
||||||
|
|
@ -164,10 +164,10 @@ def save_scene_data(root, scene, scene_idx=0, scene_total=1,file_type="txt"):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
#root = "/media/hofee/repository/new_data_with_normal"
|
#root = "/media/hofee/repository/new_data_with_normal"
|
||||||
root = r"C:\Document\Datasets\nbv_rec_part2"
|
root = r"H:\AI\Datasets\nbv_rec_part2"
|
||||||
scene_list = os.listdir(root)
|
scene_list = os.listdir(root)
|
||||||
from_idx = 600 # 1000
|
from_idx = 0 # 1000
|
||||||
to_idx = len(scene_list) # 1500
|
to_idx = 600 # 1500
|
||||||
|
|
||||||
|
|
||||||
cnt = 0
|
cnt = 0
|
||||||
|
109
runners/inferece_server.py
Normal file
109
runners/inferece_server.py
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from flask import Flask, request, jsonify
|
||||||
|
|
||||||
|
import PytorchBoot.namespace as namespace
|
||||||
|
import PytorchBoot.stereotype as stereotype
|
||||||
|
from PytorchBoot.factory import ComponentFactory
|
||||||
|
|
||||||
|
from PytorchBoot.runners.runner import Runner
|
||||||
|
from PytorchBoot.utils import Log
|
||||||
|
|
||||||
|
from utils.pts import PtsUtil
|
||||||
|
|
||||||
|
@stereotype.runner("inferencer")
|
||||||
|
class InferencerServer(Runner):
|
||||||
|
def __init__(self, config_path):
|
||||||
|
super().__init__(config_path)
|
||||||
|
|
||||||
|
''' Web Server '''
|
||||||
|
self.app = Flask(__name__)
|
||||||
|
''' Pipeline '''
|
||||||
|
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
|
||||||
|
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
|
||||||
|
self.pipeline = self.pipeline.to(self.device)
|
||||||
|
|
||||||
|
''' Experiment '''
|
||||||
|
self.load_experiment("nbv_evaluator")
|
||||||
|
|
||||||
|
def get_input_data(self, data):
|
||||||
|
input_data = {}
|
||||||
|
scanned_pts = data["scanned_pts"]
|
||||||
|
scanned_n_to_world_pose_9d = data["scanned_n_to_world_pose_9d"]
|
||||||
|
combined_scanned_views_pts = np.concatenate(scanned_pts, axis=0)
|
||||||
|
fps_downsampled_combined_scanned_pts, fps_idx = PtsUtil.fps_downsample_point_cloud(
|
||||||
|
combined_scanned_views_pts, self.pts_num, require_idx=True
|
||||||
|
)
|
||||||
|
combined_scanned_views_pts_mask = np.zeros(len(scanned_pts), dtype=np.uint8)
|
||||||
|
start_idx = 0
|
||||||
|
for i in range(len(scanned_pts)):
|
||||||
|
end_idx = start_idx + len(scanned_pts[i])
|
||||||
|
combined_scanned_views_pts_mask[start_idx:end_idx] = i
|
||||||
|
start_idx = end_idx
|
||||||
|
|
||||||
|
fps_downsampled_combined_scanned_pts_mask = combined_scanned_views_pts_mask[fps_idx]
|
||||||
|
|
||||||
|
input_data["scanned_pts_mask"] = np.asarray(fps_downsampled_combined_scanned_pts_mask, dtype=np.uint8)
|
||||||
|
input_data["scanned_n_to_world_pose_9d"] = np.asarray(scanned_n_to_world_pose_9d, dtype=np.float32)
|
||||||
|
input_data["combined_scanned_pts"] = np.asarray(fps_downsampled_combined_scanned_pts, dtype=np.float32)
|
||||||
|
return input_data
|
||||||
|
|
||||||
|
def get_result(self, output_data):
|
||||||
|
|
||||||
|
estimated_delta_rot_9d = output_data["pred_pose_9d"]
|
||||||
|
result = {
|
||||||
|
"estimated_delta_rot_9d": estimated_delta_rot_9d.tolist()
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
Log.info("Loading from epoch {}.".format(self.current_epoch))
|
||||||
|
|
||||||
|
@self.app.route("/inference", methods=["POST"])
|
||||||
|
def inference():
|
||||||
|
data = request.json
|
||||||
|
input_data = self.get_input_data(data)
|
||||||
|
output_data = self.pipeline.forward_test(input_data)
|
||||||
|
result = self.get_result(output_data)
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
|
||||||
|
self.app.run(host="0.0.0.0", port=5000)
|
||||||
|
|
||||||
|
def get_checkpoint_path(self, is_last=False):
|
||||||
|
return os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME,
|
||||||
|
"Epoch_{}.pth".format(
|
||||||
|
self.current_epoch if self.current_epoch != -1 and not is_last else "last"))
|
||||||
|
|
||||||
|
def load_checkpoint(self, is_last=False):
|
||||||
|
self.load(self.get_checkpoint_path(is_last))
|
||||||
|
Log.success(f"Loaded checkpoint from {self.get_checkpoint_path(is_last)}")
|
||||||
|
if is_last:
|
||||||
|
checkpoint_root = os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME)
|
||||||
|
meta_path = os.path.join(checkpoint_root, "meta.json")
|
||||||
|
if not os.path.exists(meta_path):
|
||||||
|
raise FileNotFoundError(
|
||||||
|
"No checkpoint meta.json file in the experiment {}".format(self.experiments_config["name"]))
|
||||||
|
file_path = os.path.join(checkpoint_root, "meta.json")
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
meta = json.load(f)
|
||||||
|
self.current_epoch = meta["last_epoch"]
|
||||||
|
self.current_iter = meta["last_iter"]
|
||||||
|
|
||||||
|
def load_experiment(self, backup_name=None):
|
||||||
|
super().load_experiment(backup_name)
|
||||||
|
self.current_epoch = self.experiments_config["epoch"]
|
||||||
|
self.load_checkpoint(is_last=(self.current_epoch == -1))
|
||||||
|
|
||||||
|
def create_experiment(self, backup_name=None):
|
||||||
|
super().create_experiment(backup_name)
|
||||||
|
|
||||||
|
|
||||||
|
def load(self, path):
|
||||||
|
state_dict = torch.load(path)
|
||||||
|
self.pipeline.load_state_dict(state_dict)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -211,6 +211,17 @@ class DataLoadUtil:
|
|||||||
pts = np.load(npy_path)
|
pts = np.load(npy_path)
|
||||||
return pts
|
return pts
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load_from_preprocessed_nrm(path, file_type="npy"):
|
||||||
|
npy_path = os.path.join(
|
||||||
|
os.path.dirname(path), "nrm", os.path.basename(path) + "." + file_type
|
||||||
|
)
|
||||||
|
if file_type == "txt":
|
||||||
|
nrm = np.loadtxt(npy_path)
|
||||||
|
else:
|
||||||
|
nrm = np.load(npy_path)
|
||||||
|
return nrm
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def cam_pose_transformation(cam_pose_before):
|
def cam_pose_transformation(cam_pose_before):
|
||||||
offset = np.asarray([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
|
offset = np.asarray([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
|
||||||
|
25
utils/vis.py
25
utils/vis.py
@ -158,17 +158,22 @@ class visualizeUtil:
|
|||||||
np.savetxt(os.path.join(output_dir, "target_normal.txt"), sampled_visualized_normal)
|
np.savetxt(os.path.join(output_dir, "target_normal.txt"), sampled_visualized_normal)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def save_pts_nrm(pts_nrm, output_dir):
|
def save_pts_nrm(root, scene, frame_idx, output_dir, binocular=False):
|
||||||
pts = pts_nrm[:, :3]
|
path = DataLoadUtil.get_path(root, scene, frame_idx)
|
||||||
nrm = pts_nrm[:, 3:]
|
pts_world = DataLoadUtil.load_from_preprocessed_pts(path, "npy")
|
||||||
|
nrm_camera = DataLoadUtil.load_from_preprocessed_nrm(path, "npy")
|
||||||
|
cam_info = DataLoadUtil.load_cam_info(path, binocular=binocular)
|
||||||
|
cam_to_world = cam_info["cam_to_world"]
|
||||||
|
nrm_world = nrm_camera @ cam_to_world[:3, :3].T
|
||||||
visualized_nrm = []
|
visualized_nrm = []
|
||||||
num_samples = 10
|
num_samples = 10
|
||||||
for i in range(len(pts)):
|
for i in range(len(pts_world)):
|
||||||
visualized_nrm.append(pts[i] + 0.02*t * nrm[i] for t in range(num_samples))
|
for t in range(num_samples):
|
||||||
visualized_nrm = np.array(visualized_nrm).reshape(-1, 3)
|
visualized_nrm.append(pts_world[i] - 0.02 * t * nrm_world[i])
|
||||||
np.savetxt(os.path.join(output_dir, "nrm.txt"), visualized_nrm)
|
|
||||||
np.savetxt(os.path.join(output_dir, "pts.txt"), pts)
|
|
||||||
|
|
||||||
|
visualized_nrm = np.array(visualized_nrm)
|
||||||
|
np.savetxt(os.path.join(output_dir, "nrm.txt"), visualized_nrm)
|
||||||
|
np.savetxt(os.path.join(output_dir, "pts.txt"), pts_world)
|
||||||
|
|
||||||
# ------ Debug ------
|
# ------ Debug ------
|
||||||
|
|
||||||
@ -184,6 +189,4 @@ if __name__ == "__main__":
|
|||||||
# visualizeUtil.save_seq_cam_pos_and_cam_axis(root, scene, [0, 121, 286, 175, 111,366,45,230,232,225,255,17,199,78,60], output_dir)
|
# visualizeUtil.save_seq_cam_pos_and_cam_axis(root, scene, [0, 121, 286, 175, 111,366,45,230,232,225,255,17,199,78,60], output_dir)
|
||||||
# visualizeUtil.save_target_mesh_at_world_space(root, model_dir, scene)
|
# visualizeUtil.save_target_mesh_at_world_space(root, model_dir, scene)
|
||||||
#visualizeUtil.save_points_and_normals(root, scene,"10", output_dir, binocular=True)
|
#visualizeUtil.save_points_and_normals(root, scene,"10", output_dir, binocular=True)
|
||||||
pts_nrm = np.loadtxt(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction\pts_nrm_target.txt")
|
visualizeUtil.save_pts_nrm(root, scene, "116", output_dir, binocular=True)
|
||||||
visualizeUtil.save_pts_nrm(pts_nrm, output_dir)
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user