add transformer seq encoder and add seq_feat in gf_view_finder
This commit is contained in:
@@ -1,10 +1,47 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
import sys; sys.path.append(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction")
|
||||
from modules.seq_encoder.abstract_seq_encoder import SequenceEncoder
|
||||
|
||||
@stereotype.module("transformer_seq_encoder")
|
||||
class TransformerSequenceEncoder(nn.Module):
|
||||
class TransformerSequenceEncoder(SequenceEncoder):
|
||||
def __init__(self, config):
|
||||
super(TransformerSequenceEncoder, self).__init__()
|
||||
self.config = config
|
||||
embed_dim = config['pts_embed_dim'] + config['pose_embed_dim']
|
||||
self.positional_encoding = nn.Parameter(torch.zeros(1, config['max_seq_len'], embed_dim))
|
||||
encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=config['num_heads'], dim_feedforward=config['ffn_dim'])
|
||||
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=config['num_layers'])
|
||||
self.fc = nn.Linear(embed_dim, config['output_dim'])
|
||||
|
||||
def encode_sequence(self, pts_embedding_list, pose_embedding_list):
|
||||
pass
|
||||
combined_features = [torch.cat((pts_embed, pose_embed), dim=-1) for pts_embed, pose_embed in zip(pts_embedding_list[:-1], pose_embedding_list[:-1])]
|
||||
combined_tensor = torch.stack(combined_features)
|
||||
pos_encoding = self.positional_encoding[:, :combined_tensor.size(0), :]
|
||||
combined_tensor = combined_tensor.unsqueeze(0) + pos_encoding
|
||||
transformer_output = self.transformer_encoder(combined_tensor).squeeze(0)
|
||||
final_feature = transformer_output.mean(dim=0)
|
||||
final_output = self.fc(final_feature)
|
||||
|
||||
return final_output
|
||||
|
||||
if __name__ == "__main__":
|
||||
config = {
|
||||
'pts_embed_dim': 1024, # 每个点云embedding的维度
|
||||
'pose_embed_dim': 256, # 每个姿态embedding的维度
|
||||
'num_heads': 4, # 多头注意力机制的头数
|
||||
'ffn_dim': 256, # 前馈神经网络的维度
|
||||
'num_layers': 3, # Transformer 编码层数
|
||||
'max_seq_len': 10, # 最大序列长度
|
||||
'output_dim': 2048, # 输出特征维度
|
||||
}
|
||||
|
||||
encoder = TransformerSequenceEncoder(config)
|
||||
seq_len = 5
|
||||
pts_embedding_list = [torch.randn(config['pts_embed_dim']) for _ in range(seq_len)]
|
||||
pose_embedding_list = [torch.randn(config['pose_embed_dim']) for _ in range(seq_len)]
|
||||
output_feature = encoder.encode_sequence(pts_embedding_list, pose_embedding_list)
|
||||
print("Encoded Feature:", output_feature)
|
||||
print("Feature Shape:", output_feature.shape)
|
Reference in New Issue
Block a user