86 lines
3.0 KiB
Python
86 lines
3.0 KiB
Python
![]() |
import numpy as np
|
||
|
from PytorchBoot.dataset import BaseDataset
|
||
|
import PytorchBoot.namespace as namespace
|
||
|
import PytorchBoot.stereotype as stereotype
|
||
|
from PytorchBoot.config import ConfigManager
|
||
|
from PytorchBoot.utils.log_util import Log
|
||
|
import pickle
|
||
|
import torch
|
||
|
import os
|
||
|
import sys
|
||
|
|
||
|
sys.path.append(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction")
|
||
|
|
||
|
from utils.data_load import DataLoadUtil
|
||
|
from utils.pose import PoseUtil
|
||
|
from utils.pts import PtsUtil
|
||
|
|
||
|
|
||
|
@stereotype.dataset("seq_reconstruction_dataset_preprocessed")
|
||
|
class SeqReconstructionDatasetPreprocessed(BaseDataset):
|
||
|
def __init__(self, config):
|
||
|
super(SeqReconstructionDatasetPreprocessed, self).__init__(config)
|
||
|
self.config = config
|
||
|
self.root_dir = config["root_dir"]
|
||
|
self.real_root_dir = r"H:\AI\Datasets\packed_test_data"
|
||
|
self.item_list = os.listdir(self.root_dir)
|
||
|
|
||
|
def __getitem__(self, index):
|
||
|
data = pickle.load(open(os.path.join(self.root_dir, self.item_list[index]), "rb"))
|
||
|
data_item = {
|
||
|
"first_scanned_pts": np.asarray(data["first_scanned_pts"], dtype=np.float32), # Ndarray(S x Nv x 3)
|
||
|
"first_scanned_coverage_rate": data["first_scanned_coverage_rate"], # List(S): Float, range(0, 1)
|
||
|
"first_scanned_n_to_world_pose_9d": np.asarray(data["first_scanned_n_to_world_pose_9d"], dtype=np.float32), # Ndarray(S x 9)
|
||
|
"seq_max_coverage_rate": data["seq_max_coverage_rate"], # Float, range(0, 1)
|
||
|
"best_seq_len": data["best_seq_len"], # Int
|
||
|
"scene_name": data["scene_name"], # String
|
||
|
"gt_pts": np.asarray(data["gt_pts"], dtype=np.float32), # Ndarray(N x 3)
|
||
|
"scene_path": os.path.join(self.real_root_dir, data["scene_name"]), # String
|
||
|
"O_to_L_pose": np.asarray(data["O_to_L_pose"], dtype=np.float32),
|
||
|
}
|
||
|
return data_item
|
||
|
|
||
|
def __len__(self):
|
||
|
return len(self.item_list)
|
||
|
|
||
|
|
||
|
# -------------- Debug ---------------- #
|
||
|
if __name__ == "__main__":
|
||
|
import torch
|
||
|
|
||
|
seed = 0
|
||
|
torch.manual_seed(seed)
|
||
|
np.random.seed(seed)
|
||
|
'''
|
||
|
OmniObject3d_test:
|
||
|
root_dir: "H:\\AI\\Datasets\\packed_test_data"
|
||
|
model_dir: "H:\\AI\\Datasets\\scaled_object_meshes"
|
||
|
source: seq_reconstruction_dataset
|
||
|
split_file: "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt"
|
||
|
type: test
|
||
|
filter_degree: 75
|
||
|
eval_list:
|
||
|
- pose_diff
|
||
|
- coverage_rate_increase
|
||
|
ratio: 0.1
|
||
|
batch_size: 1
|
||
|
num_workers: 12
|
||
|
pts_num: 8192
|
||
|
load_from_preprocess: True
|
||
|
'''
|
||
|
config = {
|
||
|
"root_dir": "H:\\AI\\Datasets\\packed_test_data",
|
||
|
"source": "seq_reconstruction_dataset",
|
||
|
"split_file": "H:\\AI\\Datasets\\data_list\\OmniObject3d_test.txt",
|
||
|
"load_from_preprocess": True,
|
||
|
"ratio": 1,
|
||
|
"filter_degree": 75,
|
||
|
"num_workers": 0,
|
||
|
"pts_num": 8192,
|
||
|
"type": "test",
|
||
|
}
|
||
|
ds = SeqReconstructionDataset(config)
|
||
|
print(len(ds))
|
||
|
print(ds.__getitem__(10))
|
||
|
|